本文整理汇总了Python中tensorflow.python.ops.math_ops.div函数的典型用法代码示例。如果您正苦于以下问题:Python div函数的具体用法?Python div怎么用?Python div使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了div函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _safe_div
def _safe_div(numerator, denominator, name="value"):
"""Computes a safe divide which returns 0 if the denominator is zero.
Note that the function contains an additional conditional check that is
necessary for avoiding situations where the loss is zero causing NaNs to
creep into the gradient computation.
Args:
numerator: An arbitrary `Tensor`.
denominator: `Tensor` whose shape matches `numerator` and whose values are
assumed to be non-negative.
name: An optional name for the returned op.
Returns:
The element-wise value of the numerator divided by the denominator.
"""
if isinstance(denominator, float):
if math_ops.equal(denominator, 0.0):
return ops.convert_to_tensor(0.0, dtype=numerator.dtype)
return math_ops.div(numerator, denominator)
if context.in_eager_mode() and denominator._rank() == 0: # pylint: disable=protected-access
if math_ops.equal(denominator, 0.0):
return ops.convert_to_tensor(0.0, dtype=numerator.dtype)
return math_ops.div(numerator, denominator)
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.div(numerator, array_ops.where(
math_ops.equal(denominator, 0),
array_ops.ones_like(denominator), denominator)),
array_ops.zeros_like(numerator),
name=name)
示例2: testAssertDivideByZero
def testAssertDivideByZero(self):
with self.test_session() as sess:
epsilon = ops.convert_to_tensor(1e-20)
x = ops.convert_to_tensor(0.0)
y = ops.convert_to_tensor(1.0)
z = ops.convert_to_tensor(2.0)
# assert(epsilon < y)
# z / y
with sess.graph.control_dependencies([
control_flow_ops.Assert(
math_ops.less(epsilon, y), ["Divide-by-zero"])
]):
out = math_ops.div(z, y)
self.assertAllEqual(2.0, out.eval())
# assert(epsilon < x)
# z / x
#
# This tests printing out multiple tensors
with sess.graph.control_dependencies([
control_flow_ops.Assert(
math_ops.less(epsilon, x), ["Divide-by-zero", "less than x"])
]):
out = math_ops.div(z, x)
with self.assertRaisesOpError("less than x"):
out.eval()
示例3: compute_best_f1_score
def compute_best_f1_score(tp, fp, fn, name):
precision_at_t = math_ops.div(tp, epsilon + tp + fp,
name='precision_' + name)
recall_at_t = math_ops.div(tp, epsilon + tp + fn, name='recall_' + name)
# Compute F1 score.
f1_at_thresholds = (
2.0 * precision_at_t * recall_at_t /
(precision_at_t + recall_at_t + epsilon))
return math_ops.reduce_max(f1_at_thresholds)
示例4: inference_graph
def inference_graph(self, input_data, **inference_args):
"""Constructs a TF graph for evaluating a random forest.
Args:
input_data: A tensor or dict of string->Tensor for the input data.
This input_data must generate the same spec as the
input_data used in training_graph: the dict must have
the same keys, for example, and all tensors must have
the same size in their first dimension.
**inference_args: Keyword arguments to pass through to each tree.
Returns:
A tuple of (probabilities, tree_paths, variance), where variance
is the variance over all the trees for regression problems only.
Raises:
NotImplementedError: If trying to use feature bagging with sparse
features.
"""
processed_dense_features, processed_sparse_features, data_spec = (
data_ops.ParseDataTensorOrDict(input_data))
probabilities = []
paths = []
for i in range(self.params.num_trees):
with ops.device(self.variables.device_dummies[i].device):
tree_data = processed_dense_features
if self.params.bagged_features:
if processed_sparse_features is not None:
raise NotImplementedError(
'Feature bagging not supported with sparse features.')
tree_data = self._bag_features(i, tree_data)
probs, path = self.trees[i].inference_graph(
tree_data,
data_spec,
sparse_features=processed_sparse_features,
**inference_args)
probabilities.append(probs)
paths.append(path)
with ops.device(self.variables.device_dummies[0].device):
# shape of all_predict should be [batch_size, num_trees, num_outputs]
all_predict = array_ops.stack(probabilities, axis=1)
average_values = math_ops.div(
math_ops.reduce_sum(all_predict, 1),
self.params.num_trees,
name='probabilities')
tree_paths = array_ops.stack(paths, axis=1)
regression_variance = None
if self.params.regression:
expected_squares = math_ops.div(
math_ops.reduce_sum(all_predict * all_predict, 1),
self.params.num_trees)
regression_variance = math_ops.maximum(
0., expected_squares - average_values * average_values)
return average_values, tree_paths, regression_variance
示例5: _DivGrad
def _DivGrad(op, grad):
"""The gradient for the Div operator."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
x = math_ops.conj(x)
y = math_ops.conj(y)
return (array_ops.reshape(math_ops.reduce_sum(math_ops.div(grad, y), rx), sx),
array_ops.reshape(
math_ops.reduce_sum(grad * math_ops.div(math_ops.div(-x, y), y),
ry), sy))
示例6: _update_mask
def _update_mask(self, weights, threshold):
"""Updates the mask for a given weight tensor.
This functions first computes the cdf of the weight tensor, and estimates
the threshold value such that 'desired_sparsity' fraction of weights
have magnitude less than the threshold.
Args:
weights: The weight tensor that needs to be masked.
threshold: The current threshold value. The function will compute a new
threshold and return the exponential moving average using the current
value of threshold
Returns:
new_threshold: The new value of the threshold based on weights, and
sparsity at the current global_step
new_mask: A numpy array of the same size and shape as weights containing
0 or 1 to indicate which of the values in weights falls below
the threshold
Raises:
ValueError: if sparsity is not defined
"""
if self._sparsity is None:
raise ValueError('Sparsity variable undefined')
with ops.name_scope(weights.op.name + '_pruning_ops'):
abs_weights = math_ops.abs(weights)
max_value = math_ops.reduce_max(abs_weights)
histogram = _histogram(
abs_weights, [0.0, max_value],
nbins=self._spec.nbins,
dtype=np.float32)
cdf = math_ops.cumsum(histogram)
norm_cdf = math_ops.div(cdf, math_ops.reduce_sum(histogram))
current_threshold = math_ops.multiply(
math_ops.div(
math_ops.reduce_sum(
math_ops.cast(
math_ops.less(norm_cdf, self._sparsity), np.float32)),
float(self._spec.nbins)), max_value)
smoothed_threshold = math_ops.add_n([
math_ops.multiply(current_threshold, 1 - self._spec.threshold_decay),
math_ops.multiply(threshold, self._spec.threshold_decay)
])
new_mask = math_ops.cast(
math_ops.greater(abs_weights, smoothed_threshold), np.float32)
return smoothed_threshold, new_mask
示例7: _DivGrad
def _DivGrad(op, grad):
"""The gradient for the Div operator."""
x = op.inputs[0]
y = op.inputs[1]
sx = array_ops.shape(x)
sy = array_ops.shape(y)
# pylint: disable=protected-access
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
# pylint: enable=protected-access
x = math_ops.conj(x)
y = math_ops.conj(y)
return (array_ops.reshape(math_ops.reduce_sum(math_ops.div(grad, y), rx), sx),
array_ops.reshape(math_ops.reduce_sum(
grad * math_ops.div(-x, math_ops.square(y)), ry), sy))
示例8: GetParams
def GetParams(self):
"""Create a graph containing multiple segment."""
# TODO(aaroey): test graph with different dtypes.
dtype = dtypes.float32
input_name = "input"
input_dims = [100, 24, 24, 2]
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(
dtype=dtype, shape=[None] + input_dims[1:], name=input_name)
with g.device("/GPU:0"):
conv_filter = constant_op.constant(
[[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
name="weights",
dtype=dtype)
conv = nn.conv2d(
input=inp,
filter=conv_filter,
strides=[1, 2, 2, 1],
padding="SAME",
name="conv")
c1 = constant_op.constant(
np.random.randn(input_dims[0], 12, 12, 6), dtype=dtype, name="c1")
p = math_ops.mul(conv, c1, name="mul")
c2 = constant_op.constant(
np.random.randn(input_dims[0], 12, 12, 6), dtype=dtype, name="c2")
q = math_ops.div(conv, c2, name="div")
edge = self.trt_incompatible_op(q, name="incompatible")
edge = math_ops.div(edge, edge, name="div1")
r = math_ops.add(edge, edge, name="add")
p = math_ops.sub(p, edge, name="sub")
q = math_ops.mul(q, edge, name="mul1")
s = math_ops.add(p, q, name="add1")
s = math_ops.sub(s, r, name="sub1")
array_ops.squeeze(s, name=self.output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[input_dims],
# TODO(aaroey): LayoutOptimizer adds additional nodes to the graph which
# breaks the connection check, fix it.
# - my_trt_op_0 should have ["mul", "sub", "div1", "mul1", "add1",
# "add", "sub1"];
# - my_trt_op_1 should have ["weights","conv", "div"]
expected_engines=["my_trt_op_0", "my_trt_op_1"],
expected_output_dims=(100, 12, 12, 6),
allclose_atol=1.e-03,
allclose_rtol=1.e-03)
示例9: _safe_div
def _safe_div(numerator, denominator, name="value"):
"""Computes a safe divide which returns 0 if the denominator is zero.
Note that the function contains an additional conditional check that is
necessary for avoiding situations where the loss is zero causing NaNs to
creep into the gradient computation.
Args:
numerator: An arbitrary `Tensor`.
denominator: A `Tensor` whose shape matches `numerator` and whose values are
assumed to be non-negative.
name: An optional name for the returned op.
Returns:
The element-wise value of the numerator divided by the denominator.
"""
if compat.forward_compatible(2018, 11, 1):
return math_ops.div_no_nan(numerator, denominator, name=name)
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.div(numerator,
array_ops.where(
math_ops.equal(denominator, 0),
array_ops.ones_like(denominator), denominator)),
array_ops.zeros_like(numerator),
name=name)
示例10: accuracy
def accuracy(predictions, labels, weights=None):
"""Computes the percentage of times that predictions matches labels.
Args:
predictions: the predicted values, a `Tensor` whose dtype and shape
matches 'labels'.
labels: the ground truth values, a `Tensor` of any shape and
bool, integer, or string dtype.
weights: None or `Tensor` of float values to reweight the accuracy.
Returns:
Accuracy `Tensor`.
Raises:
ValueError: if dtypes don't match or
if dtype is not bool, integer, or string.
"""
if not (labels.dtype.is_integer or
labels.dtype in (dtypes.bool, dtypes.string)):
raise ValueError(
'Labels should have bool, integer, or string dtype, not %r' %
labels.dtype)
if not labels.dtype.is_compatible_with(predictions.dtype):
raise ValueError('Dtypes of predictions and labels should match. '
'Given: predictions (%r) and labels (%r)' %
(predictions.dtype, labels.dtype))
with ops.name_scope('accuracy', values=[predictions, labels]):
is_correct = math_ops.cast(
math_ops.equal(predictions, labels), dtypes.float32)
if weights is not None:
is_correct = math_ops.mul(is_correct, weights)
num_values = math_ops.mul(weights, array_ops.ones_like(is_correct))
return math_ops.div(math_ops.reduce_sum(is_correct),
math_ops.reduce_sum(num_values))
return math_ops.reduce_mean(is_correct)
示例11: squared_loss
def squared_loss(predicted, target, name=None):
"""Computes and returns the per-example squared loss, divided by 2.
Computes the per-example squared difference between the target and
predicted tensors. The tensors must have the same shape.
Args:
predicted: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]`
of predicted values.
target: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]` of
target values. The shape of the target tensor should match the
`predicted` tensor.
name: A name for the operation (optional).
Returns:
A `[batch_size, dim_1, ..., dim_n]` tensor of per-example squared losses.
Raises:
ValueError: If `predicted` and `target` shapes do not match.
"""
with ops.op_scope([predicted, target], name, "squared_loss") as scope:
predicted = ops.convert_to_tensor(predicted, name="predicted")
target = ops.convert_to_tensor(target, name="target")
_validate_predicted_and_target(predicted, target)
return math_ops.div(math_ops.square(target - predicted), 2.0, name=scope)
示例12: weighted_moving_average
def weighted_moving_average(value,
decay,
weight,
truediv=True,
collections=None,
name=None):
"""Compute the weighted moving average of `value`.
Conceptually, the weighted moving average is:
`moving_average(value * weight) / moving_average(weight)`,
where a moving average updates by the rule
`new_value = decay * old_value + (1 - decay) * update`
Internally, this Op keeps moving average variables of both `value * weight`
and `weight`.
Args:
value: A numeric `Tensor`.
decay: A float `Tensor` or float value. The moving average decay.
weight: `Tensor` that keeps the current value of a weight.
Shape should be able to multiply `value`.
truediv: Boolean, if `True`, dividing by `moving_average(weight)` is
floating point division. If `False`, use division implied by dtypes.
collections: List of graph collections keys to add the internal variables
`value * weight` and `weight` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
name: Optional name of the returned operation.
Defaults to "WeightedMovingAvg".
Returns:
An Operation that updates and returns the weighted moving average.
"""
# Unlike assign_moving_average, the weighted moving average doesn't modify
# user-visible variables. It is the ratio of two internal variables, which are
# moving averages of the updates. Thus, the signature of this function is
# quite different than assign_moving_average.
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
with variable_scope.variable_scope(name, "WeightedMovingAvg",
[value, weight, decay]) as scope:
value_x_weight_var = variable_scope.get_variable(
"value_x_weight",
initializer=init_ops.zeros_initializer(value.get_shape(),
dtype=value.dtype),
trainable=False,
collections=collections)
weight_var = variable_scope.get_variable(
"weight",
initializer=init_ops.zeros_initializer(weight.get_shape(),
dtype=weight.dtype),
trainable=False,
collections=collections)
numerator = assign_moving_average(
value_x_weight_var, value * weight, decay, zero_debias=False)
denominator = assign_moving_average(
weight_var, weight, decay, zero_debias=False)
if truediv:
return math_ops.truediv(numerator, denominator, name=scope.name)
else:
return math_ops.div(numerator, denominator, name=scope.name)
示例13: setUp
def setUp(self):
self.a = variables.VariableV1(2.0, name="a")
self.b = variables.VariableV1(3.0, name="b")
self.c = math_ops.multiply(self.a, self.b, name="c") # Should be 6.0.
self.d = math_ops.multiply(self.a, self.a, name="d") # Should be 4.0.
self.e = math_ops.multiply(self.d, self.c, name="e") # Should be 24.0.
self.f_y = constant_op.constant(0.30, name="f_y")
self.f = math_ops.div(self.b, self.f_y, name="f") # Should be 10.0.
# The there nodes x, y and z form a graph with "cross-links" in. I.e., x
# and y are both direct inputs to z, but x is also a direct input to y.
self.x = variables.VariableV1(2.0, name="x") # Should be 2.0
self.y = math_ops.negative(self.x, name="y") # Should be -2.0.
self.z = math_ops.multiply(self.x, self.y, name="z") # Should be -4.0.
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
constant_folding=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
config = config_pb2.ConfigProto(graph_options=graph_options)
self.sess = session.Session(config=config)
self.sess.run(variables.global_variables_initializer())
示例14: loss
def loss(self, logits, target, features):
"""Returns loss tensor for this head.
The loss returned is the weighted average.
L = sum_{i} w_{i} * l_{i} / sum_{i} w_{i}
Args:
logits: logits, a float tensor.
target: either a tensor for labels or in multihead case, a dict of string
to target tensor.
features: features dict.
Returns:
Loss tensor.
"""
target = target[self.name] if isinstance(target, dict) else target
loss_unweighted = self._loss_fn(logits, target)
weight_tensor = self.get_weight_tensor(features)
if weight_tensor is None:
return math_ops.reduce_mean(loss_unweighted, name="loss")
loss_weighted = self._weighted_loss(loss_unweighted, weight_tensor)
return math_ops.div(
math_ops.reduce_sum(loss_weighted),
math_ops.cast(math_ops.reduce_sum(weight_tensor), dtypes.float32),
name="loss")
示例15: loss
def loss(self, logits, target, features):
"""Returns loss tensor for this head.
Args:
logits: logits, a float tensor.
target: either a tensor for labels or in multihead case, a dict of string
to target tensor.
features: features dict.
Returns:
Loss tensor.
"""
target = target[self.name] if isinstance(target, dict) else target
loss_unweighted = self._loss_fn(logits, target)
weight_tensor = self.get_weight_tensor(features)
if weight_tensor is None:
return math_ops.reduce_mean(loss_unweighted, name="loss")
else:
loss_unweighted = array_ops.reshape(loss_unweighted, shape=(-1,))
loss_weighted = math_ops.mul(
loss_unweighted, array_ops.reshape(weight_tensor, shape=(-1,)))
return math_ops.div(
math_ops.reduce_sum(loss_weighted),
math_ops.to_float(math_ops.reduce_sum(weight_tensor)),
name="loss")