本文整理汇总了Python中tensorflow.python.ops.math_ops.greater函数的典型用法代码示例。如果您正苦于以下问题:Python greater函数的具体用法?Python greater怎么用?Python greater使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了greater函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: mode
def mode(self, name="mode"):
"""Mode of the distribution.
Note that the mode for the Beta distribution is only defined
when `a > 1`, `b > 1`. This returns the mode when `a > 1` and `b > 1`,
and NaN otherwise. If `self.allow_nan_stats` is `False`, an exception
will be raised rather than returning `NaN`.
Args:
name: The name for this op.
Returns:
Mode of the Beta distribution.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._a, self._b, self._a_b_sum], name):
a = self._a
b = self._b
a_b_sum = self._a_b_sum
one = constant_op.constant(1, self.dtype)
mode = (a - 1)/ (a_b_sum - 2)
if self.allow_nan_stats:
return math_ops.select(
math_ops.logical_and(
math_ops.greater(a, 1), math_ops.greater(b, 1)),
mode,
(constant_op.constant(float("NaN"), dtype=self.dtype) *
array_ops.ones_like(a_b_sum, dtype=self.dtype)))
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(one, a),
check_ops.assert_less(one, b)], mode)
示例2: _variance
def _variance(self):
# We need to put the tf.where inside the outer tf.where to ensure we never
# hit a NaN in the gradient.
denom = array_ops.where(math_ops.greater(self.df, 2.),
self.df - 2.,
array_ops.ones_like(self.df))
# Abs(scale) superfluous.
var = (array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype) *
math_ops.square(self.scale) * self.df / denom)
# When 1 < df <= 2, variance is infinite.
inf = np.array(np.inf, dtype=self.dtype.as_numpy_dtype())
result_where_defined = array_ops.where(
self.df > array_ops.fill(self.batch_shape_tensor(), 2.),
var,
array_ops.fill(self.batch_shape_tensor(), inf, name="inf"))
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return array_ops.where(
math_ops.greater(
self.df,
array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)),
result_where_defined,
array_ops.fill(self.batch_shape_tensor(), nan, name="nan"))
else:
return control_flow_ops.with_dependencies(
[
check_ops.assert_less(
array_ops.ones([], dtype=self.dtype),
self.df,
message="variance not defined for components of df <= 1"),
],
result_where_defined)
示例3: _mode
def _mode(self):
mode = (self.a - 1.0) / (self.a_b_sum - 2.0)
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return math_ops.select(
math_ops.logical_and(math_ops.greater(self.a, 1.0), math_ops.greater(self.b, 1.0)),
mode,
array_ops.fill(self.batch_shape(), nan, name="nan"),
)
else:
return control_flow_ops.with_dependencies(
[
check_ops.assert_less(
array_ops.ones((), dtype=self.dtype),
self.a,
message="Mode not defined for components of a <= 1.",
),
check_ops.assert_less(
array_ops.ones((), dtype=self.dtype),
self.b,
message="Mode not defined for components of b <= 1.",
),
],
mode,
)
示例4: _variance
def _variance(self):
var = self._ones() * math_ops.square(self.sigma) * self.df / (self.df - 2)
# When 1 < df <= 2, variance is infinite.
inf = np.array(np.inf, dtype=self.dtype.as_numpy_dtype())
result_where_defined = math_ops.select(
math_ops.greater(self.df, array_ops.fill(self.batch_shape(), 2.0)),
var,
array_ops.fill(self.batch_shape(), inf, name="inf"),
)
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return math_ops.select(
math_ops.greater(self.df, self._ones()),
result_where_defined,
array_ops.fill(self.batch_shape(), nan, name="nan"),
)
else:
return control_flow_ops.with_dependencies(
[
check_ops.assert_less(
array_ops.ones((), dtype=self.dtype),
self.df,
message="variance not defined for components of df <= 1",
)
],
result_where_defined,
)
示例5: compute_lr
def compute_lr(self, grad, var):
scaled_lr = self._learning_rate
if self._skip_list is None or not any(v in var.name
for v in self._skip_list):
w_norm = linalg_ops.norm(var, ord=2)
g_norm = linalg_ops.norm(grad, ord=2)
trust_ratio = array_ops.where(
math_ops.greater(w_norm, 0),
array_ops.where(
math_ops.greater(g_norm, 0),
(self._eeta * w_norm /
(g_norm + self._weight_decay * w_norm + self._epsilon)), 1.0),
1.0)
scaled_lr = self._learning_rate * trust_ratio
return scaled_lr
示例6: _safe_div
def _safe_div(numerator, denominator, name="value"):
"""Computes a safe divide which returns 0 if the denominator is zero.
Note that the function contains an additional conditional check that is
necessary for avoiding situations where the loss is zero causing NaNs to
creep into the gradient computation.
Args:
numerator: An arbitrary `Tensor`.
denominator: `Tensor` whose shape matches `numerator` and whose values are
assumed to be non-negative.
name: An optional name for the returned op.
Returns:
The element-wise value of the numerator divided by the denominator.
"""
if isinstance(denominator, float):
if math_ops.equal(denominator, 0.0):
return ops.convert_to_tensor(0.0, dtype=numerator.dtype)
return math_ops.div(numerator, denominator)
if context.in_eager_mode() and denominator._rank() == 0: # pylint: disable=protected-access
if math_ops.equal(denominator, 0.0):
return ops.convert_to_tensor(0.0, dtype=numerator.dtype)
return math_ops.div(numerator, denominator)
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.div(numerator, array_ops.where(
math_ops.equal(denominator, 0),
array_ops.ones_like(denominator), denominator)),
array_ops.zeros_like(numerator),
name=name)
示例7: filter_functions
def filter_functions():
take_all = lambda x: constant_op.constant(True)
is_zero = lambda x: math_ops.equal(x, 0)
greater = lambda x: math_ops.greater(x + 5, 0)
tests = []
filters = [take_all, is_zero, greater]
identity = lambda x: x
for x, predicate_1 in enumerate(filters):
for y, predicate_2 in enumerate(filters):
tests.append(("Mixed{}{}".format(x, y), identity,
[predicate_1, predicate_2]))
for z, predicate_3 in enumerate(filters):
tests.append(("Mixed{}{}{}".format(x, y, z), identity,
[predicate_1, predicate_2, predicate_3]))
take_all_multiple = lambda x, y: constant_op.constant(True)
# Multi output
tests.append(("Multi1", lambda x: (x, x),
[take_all_multiple, take_all_multiple]))
tests.append(("Multi2", lambda x: (x, 2), [
take_all_multiple,
lambda x, y: math_ops.equal(x * math_ops.cast(y, dtypes.int64), 0)
]))
return tuple(tests)
示例8: map_and_filter_functions
def map_and_filter_functions():
identity = lambda x: x
increment = lambda x: x + 1
minus_five = lambda x: x - 5
def increment_and_square(x):
y = x + 1
return y * y
take_all = lambda x: constant_op.constant(True)
is_zero = lambda x: math_ops.equal(x, 0)
is_odd = lambda x: math_ops.equal(x % 2, 0)
greater = lambda x: math_ops.greater(x + 5, 0)
functions = [identity, increment, minus_five, increment_and_square]
filters = [take_all, is_zero, is_odd, greater]
tests = []
for x, fun in enumerate(functions):
for y, predicate in enumerate(filters):
tests.append(("Mixed{}{}".format(x, y), fun, predicate))
# Multi output
tests.append(("Multi1", lambda x: (x, x),
lambda x, y: constant_op.constant(True)))
tests.append(
("Multi2", lambda x: (x, 2),
lambda x, y: math_ops.equal(x * math_ops.cast(y, dtypes.int64), 0)))
return tuple(tests)
示例9: _prune_invalid_weights
def _prune_invalid_weights(sparse_ids, sparse_weights):
"""Prune invalid weights (< 0) from the input ids and weights."""
if sparse_weights is not None:
is_weights_valid = math_ops.greater(sparse_weights.values, 0)
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_weights_valid)
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_weights_valid)
return sparse_ids, sparse_weights
示例10: gcd
def gcd(a, b, name=None):
"""Returns the greatest common divisor via Euclid's algorithm.
Args:
a: The dividend. A scalar integer `Tensor`.
b: The divisor. A scalar integer `Tensor`.
name: An optional name for the operation.
Returns:
A scalar `Tensor` representing the greatest common divisor between `a` and
`b`.
Raises:
ValueError: If `a` or `b` are not scalar integers.
"""
with ops.name_scope(name, 'gcd', [a, b]):
a = ops.convert_to_tensor(a)
b = ops.convert_to_tensor(b)
a.shape.assert_has_rank(0)
b.shape.assert_has_rank(0)
if not a.dtype.is_integer:
raise ValueError('a must be an integer type. Got: %s' % a.dtype)
if not b.dtype.is_integer:
raise ValueError('b must be an integer type. Got: %s' % b.dtype)
cond = lambda _, b: math_ops.greater(b, array_ops.zeros_like(b))
body = lambda a, b: [b, math_ops.mod(a, b)]
a, b = control_flow_ops.while_loop(cond, body, [a, b], back_prop=False)
return a
示例11: average_impurity
def average_impurity(self):
"""Constructs a TF graph for evaluating the average leaf impurity of a tree.
If in regression mode, this is the leaf variance. If in classification mode,
this is the gini impurity.
Returns:
The last op in the graph.
"""
children = array_ops.squeeze(array_ops.slice(
self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
is_leaf = math_ops.equal(constants.LEAF_NODE, children)
leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
squeeze_dims=[1]))
counts = array_ops.gather(self.variables.node_sums, leaves)
gini = self._weighted_gini(counts)
# Guard against step 1, when there often are no leaves yet.
def impurity():
return gini
# Since average impurity can be used for loss, when there's no data just
# return a big number so that loss always decreases.
def big():
return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.
return control_flow_ops.cond(math_ops.greater(
array_ops.shape(leaves)[0], 0), impurity, big)
示例12: mode
def mode(self, name="mode"):
"""Mode of the distribution.
Note that the mode for the Beta distribution is only defined
when `alpha > 1`. This returns the mode when `alpha > 1`,
and NaN otherwise. If `self.allow_nan_stats` is `False`, an exception
will be raised rather than returning `NaN`.
Args:
name: The name for this op.
Returns:
Mode of the Dirichlet distribution.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._alpha, self._alpha_0], name):
one = constant_op.constant(1, self.dtype)
mode = (self._alpha - 1)/ (
array_ops.expand_dims(self._alpha_0, -1) - math_ops.cast(
self.event_shape()[0], self.dtype))
if self.allow_nan_stats:
return math_ops.select(
math_ops.greater(self._alpha, 1),
mode,
(constant_op.constant(float("NaN"), dtype=self.dtype) *
array_ops.ones_like(self._alpha, dtype=self.dtype)))
else:
return control_flow_ops.with_dependencies([
check_ops.assert_less(
one, self._alpha,
message="mode not defined for components of alpha <= 1")
], mode)
示例13: _safe_div
def _safe_div(numerator, denominator, name="value"):
"""Computes a safe divide which returns 0 if the denominator is zero.
Note that the function contains an additional conditional check that is
necessary for avoiding situations where the loss is zero causing NaNs to
creep into the gradient computation.
Args:
numerator: An arbitrary `Tensor`.
denominator: A `Tensor` whose shape matches `numerator` and whose values are
assumed to be non-negative.
name: An optional name for the returned op.
Returns:
The element-wise value of the numerator divided by the denominator.
"""
if compat.forward_compatible(2018, 11, 1):
return math_ops.div_no_nan(numerator, denominator, name=name)
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.div(numerator,
array_ops.where(
math_ops.equal(denominator, 0),
array_ops.ones_like(denominator), denominator)),
array_ops.zeros_like(numerator),
name=name)
示例14: _get_stratified_batch_from_tensors
def _get_stratified_batch_from_tensors(val, label, reject_probs, batch_size,
queue_threads=3):
"""Reject examples one-at-a-time based on class."""
# Make rejection probabilities into a tensor so they can be dynamically
# accessed by tensors.
reject_probs = constant_op.constant(
reject_probs, dtype=dtypes.float32, name='rejection_probabilities')
# Make queue that will have proper class proportions. Contains exactly one
# batch at a time.
val_shape = val.get_shape()
label_shape = label.get_shape()
final_q = data_flow_ops.FIFOQueue(capacity=batch_size,
shapes=[val_shape, label_shape],
dtypes=[val.dtype, label.dtype],
name='batched_queue')
# Conditionally enqueue.
eq_tf = array_ops.reshape(math_ops.greater(
random_ops.random_uniform([1]),
array_ops.slice(reject_probs, [label], [1])),
[])
conditional_enqueue = control_flow_ops.cond(
eq_tf,
lambda: final_q.enqueue([val, label]),
control_flow_ops.no_op)
queue_runner.add_queue_runner(queue_runner.QueueRunner(
final_q, [conditional_enqueue] * queue_threads))
return final_q.dequeue_many(batch_size)
示例15: testLargeCase
def testLargeCase(self):
shape = [32, 512, 256, 1]
predictions = random_ops.random_uniform(
shape, 0.0, 1.0, dtype=dtypes_lib.float32)
labels = math_ops.greater(random_ops.random_uniform(shape, 0.0, 1.0), 0.5)
result, update_op = metric_ops.precision_recall_at_equal_thresholds(
labels=labels, predictions=predictions, num_thresholds=201)
# Run many updates, enough to cause highly inaccurate values if the
# code used float32 for accumulation.
num_updates = 71
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
for _ in xrange(num_updates):
sess.run(update_op)
prdata = sess.run(result)
# Since we use random values, we won't know the tp/fp/tn/fn values, but
# tp and fp at threshold 0 should be the total number of positive and
# negative labels, hence their sum should be total number of pixels.
expected_value = 1.0 * np.product(shape) * num_updates
got_value = prdata.tp[0] + prdata.fp[0]
# They should be at least within 1.
self.assertNear(got_value, expected_value, 1.0)