本文整理汇总了Python中tensorflow.python.ops.math_ops.greater_equal函数的典型用法代码示例。如果您正苦于以下问题:Python greater_equal函数的具体用法?Python greater_equal怎么用?Python greater_equal使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了greater_equal函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _lower_triangular_mask
def _lower_triangular_mask(shape):
"""Creates a lower-triangular boolean mask over the last 2 dimensions."""
row_index = math_ops.cumsum(
array_ops.ones(shape=shape, dtype=dtypes.int32), axis=-2)
col_index = math_ops.cumsum(
array_ops.ones(shape=shape, dtype=dtypes.int32), axis=-1)
return math_ops.greater_equal(row_index, col_index)
示例2: _benchmarkMapAndFilter
def _benchmarkMapAndFilter(self, chain_length, optimize_dataset):
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors(0).repeat(None)
for _ in range(chain_length):
dataset = dataset.map(lambda x: x + 5).filter(
lambda x: math_ops.greater_equal(x - 5, 0))
if optimize_dataset:
dataset = dataset.apply(
optimization.optimize(["map_and_filter_fusion"]))
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with session.Session() as sess:
for _ in range(10):
sess.run(next_element.op)
deltas = []
for _ in range(100):
start = time.time()
for _ in range(100):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
opt_mark = "opt" if optimize_dataset else "no-opt"
print("Map and filter dataset {} chain length: {} Median wall time: {}".
format(opt_mark, chain_length, median_wall_time))
self.report_benchmark(
iters=1000,
wall_time=median_wall_time,
name="benchmark_map_and_filter_dataset_chain_latency_{}_{}".format(
opt_mark, chain_length))
示例3: _filter_input
def _filter_input(input_tensor, vocab_freq_table, vocab_min_count,
vocab_subsampling, corpus_size, seed):
"""Filters input tensor based on vocab freq, threshold, and subsampling."""
if vocab_freq_table is None:
return input_tensor
if not isinstance(vocab_freq_table, lookup.InitializableLookupTableBase):
raise ValueError(
"vocab_freq_table must be a subclass of "
"InitializableLookupTableBase (such as HashTable) instead of type "
"{}.".format(type(vocab_freq_table)))
with ops.name_scope(
"filter_vocab", values=[vocab_freq_table, input_tensor, vocab_min_count]):
freq = vocab_freq_table.lookup(input_tensor)
# Filters out elements in input_tensor that are not found in
# vocab_freq_table (table returns a default value of -1 specified above when
# an element is not found).
mask = math_ops.not_equal(freq, vocab_freq_table.default_value)
# Filters out elements whose vocab frequencies are less than the threshold.
if vocab_min_count is not None:
cast_threshold = math_ops.cast(vocab_min_count, freq.dtype)
mask = math_ops.logical_and(mask,
math_ops.greater_equal(freq, cast_threshold))
input_tensor = array_ops.boolean_mask(input_tensor, mask)
freq = array_ops.boolean_mask(freq, mask)
if not vocab_subsampling:
return input_tensor
if vocab_subsampling < 0 or vocab_subsampling > 1:
raise ValueError(
"Invalid vocab_subsampling={} - it should be within range [0, 1].".
format(vocab_subsampling))
# Subsamples the input tokens based on vocabulary frequency and
# vocab_subsampling threshold (ie randomly discard commonly appearing
# tokens).
with ops.name_scope(
"subsample_vocab", values=[input_tensor, freq, vocab_subsampling]):
corpus_size = math_ops.cast(corpus_size, dtypes.float64)
freq = math_ops.cast(freq, dtypes.float64)
vocab_subsampling = math_ops.cast(vocab_subsampling, dtypes.float64)
# From tensorflow_models/tutorials/embedding/word2vec_kernels.cc, which is
# suppose to correlate with Eq. 5 in http://arxiv.org/abs/1310.4546.
keep_prob = ((math_ops.sqrt(freq /
(vocab_subsampling * corpus_size)) + 1.0) *
(vocab_subsampling * corpus_size / freq))
random_prob = random_ops.random_uniform(
array_ops.shape(freq),
minval=0,
maxval=1,
dtype=dtypes.float64,
seed=seed)
mask = math_ops.less_equal(random_prob, keep_prob)
return array_ops.boolean_mask(input_tensor, mask)
示例4: _prune_invalid_ids
def _prune_invalid_ids(sparse_ids, sparse_weights):
"""Prune invalid IDs (< 0) from the input ids and weights."""
is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
if sparse_weights is not None:
is_id_valid = math_ops.logical_and(is_id_valid, math_ops.greater(sparse_weights.values, 0))
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
if sparse_weights is not None:
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
return sparse_ids, sparse_weights
示例5: _accuracy_at_threshold
def _accuracy_at_threshold(labels, predictions, weights, threshold, name=None):
with ops.name_scope(
name, 'accuracy_at_%s' % threshold,
(predictions, labels, weights, threshold)) as scope:
threshold_predictions = math_ops.to_float(
math_ops.greater_equal(predictions, threshold))
return metrics_lib.accuracy(
labels=labels, predictions=threshold_predictions, weights=weights,
name=scope)
示例6: cosine_decay_fn
def cosine_decay_fn(global_step):
if global_step is None:
raise ValueError("global_step is required for cosine_decay.")
global_step = math_ops.minimum(global_step, decay_steps)
completed_fraction = math_ops.to_float(global_step) / math_ops.to_float(
decay_steps)
fraction = 2.0 * num_periods * completed_fraction
decayed = 0.5 * (
1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
if zero_after is not None:
decayed = array_ops.where(
math_ops.greater_equal(fraction, 2 * zero_after), 0.0, decayed)
return decayed
示例7: assert_rank_at_least
def assert_rank_at_least(x, rank, data=None, summarize=None, name=None):
"""Assert `x` has rank equal to `rank` or higher.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_rank_at_least(x, 2)]):
output = tf.reduce_sum(x)
```
Example of adding dependency to the tensor being checked:
```python
x = tf.with_dependencies([tf.assert_rank_at_least(x, 2)], x)
```
Args:
x: Numeric `Tensor`.
rank: Scalar `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
name: A name for this operation (optional).
Defaults to "assert_rank_at_least".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank or higher.
Raises:
ValueError: If static checks determine `x` has wrong rank.
"""
with ops.op_scope([x], name, "assert_rank_at_least"):
x = ops.convert_to_tensor(x, name="x")
rank = ops.convert_to_tensor(rank, name="rank")
# Attempt to statically defined rank.
x_rank_static = x.get_shape().ndims
rank_static = tensor_util.constant_value(rank)
if x_rank_static is not None and rank_static is not None:
if x_rank_static < rank_static:
raise ValueError(
"Tensor %s must have rank %d. Received rank %d, shape %s"
% (x.name, rank_static, x_rank_static, x.get_shape())
)
return control_flow_ops.no_op(name="static_checks_determined_all_ok")
if data is None:
data = ["Tensor %s must have rank at least" % x.name, rank, "Received shape: ", array_ops.shape(x)]
condition = math_ops.greater_equal(array_ops.rank(x), rank)
return logging_ops.Assert(condition, data, summarize=summarize)
示例8: maybe_update_masks
def maybe_update_masks():
with ops.name_scope(self._spec.name):
is_step_within_pruning_range = math_ops.logical_and(
math_ops.greater_equal(self._global_step,
self._spec.begin_pruning_step),
# If end_pruning_step is negative, keep pruning forever!
math_ops.logical_or(
math_ops.less_equal(self._global_step,
self._spec.end_pruning_step),
math_ops.less(self._spec.end_pruning_step, 0)))
is_pruning_step = math_ops.less_equal(
math_ops.add(self._last_update_step, self._spec.pruning_frequency),
self._global_step)
return math_ops.logical_and(is_step_within_pruning_range,
is_pruning_step)
示例9: restart_decay_fn
def restart_decay_fn(global_step):
if global_step is None:
raise ValueError("global_step is required for cosine_decay.")
global_step = math_ops.minimum(global_step, decay_steps)
num = math_ops.mod(num_periods * math_ops.to_float(global_step),
decay_steps)
fraction = num / math_ops.to_float(decay_steps)
decayed = 0.5 * (
1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
if zero_after is not None:
tmp = math_ops.to_float(
num_periods * global_step) / math_ops.to_float(decay_steps)
decayed = array_ops.where(
math_ops.greater_equal(tmp, zero_after), 0.0, decayed)
return decayed
示例10: _make_logistic_eval_metric_ops
def _make_logistic_eval_metric_ops(labels, predictions, thresholds):
"""Returns a dictionary of evaluation metric ops for logistic regression.
Args:
labels: The labels `Tensor`, or a dict with only one `Tensor` keyed by name.
predictions: The predictions `Tensor`.
thresholds: List of floating point thresholds to use for accuracy,
precision, and recall metrics.
Returns:
A dict of metric results keyed by name.
"""
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor = labels.values()[0]
metrics = {}
metrics[metric_key.MetricKey.PREDICTION_MEAN] = metrics_lib.streaming_mean(
predictions)
metrics[metric_key.MetricKey.LABEL_MEAN] = metrics_lib.streaming_mean(
labels_tensor)
# Also include the streaming mean of the label as an accuracy baseline, as
# a reminder to users.
metrics[metric_key.MetricKey.ACCURACY_BASELINE] = metrics_lib.streaming_mean(
labels_tensor)
metrics[metric_key.MetricKey.AUC] = metrics_lib.streaming_auc(
labels=labels_tensor, predictions=predictions)
for threshold in thresholds:
predictions_at_threshold = math_ops.cast(
math_ops.greater_equal(predictions, threshold),
dtypes.float32,
name='predictions_at_threshold_%f' % threshold)
metrics[metric_key.MetricKey.ACCURACY_MEAN % threshold] = (
metrics_lib.streaming_accuracy(labels=labels_tensor,
predictions=predictions_at_threshold))
# Precision for positive examples.
metrics[metric_key.MetricKey.PRECISION_MEAN % threshold] = (
metrics_lib.streaming_precision(labels=labels_tensor,
predictions=predictions_at_threshold))
# Recall for positive examples.
metrics[metric_key.MetricKey.RECALL_MEAN % threshold] = (
metrics_lib.streaming_recall(labels=labels_tensor,
predictions=predictions_at_threshold))
return metrics
示例11: grow_tree
def grow_tree(self, stats_summaries_list, feature_ids_list,
last_layer_nodes_range):
# For not in memory situation, we need to accumulate enough of batches first
# before proceeding with building a tree layer.
max_splits = _get_max_splits(self._tree_hparams)
# Prepare accumulators.
accumulators = []
dependencies = []
for i, feature_ids in enumerate(feature_ids_list):
stats_summaries = stats_summaries_list[i]
accumulator = data_flow_ops.ConditionalAccumulator(
dtype=dtypes.float32,
# The stats consist of grads and hessians (the last dimension).
shape=[len(feature_ids), max_splits, self._bucket_size_list[i], 2],
shared_name='numeric_stats_summary_accumulator_' + str(i))
accumulators.append(accumulator)
apply_grad = accumulator.apply_grad(
array_ops.stack(stats_summaries, axis=0), self._stamp_token)
dependencies.append(apply_grad)
# Grow the tree if enough batches is accumulated.
with ops.control_dependencies(dependencies):
if not self._is_chief:
return control_flow_ops.no_op()
min_accumulated = math_ops.reduce_min(
array_ops.stack([acc.num_accumulated() for acc in accumulators]))
def grow_tree_from_accumulated_summaries_fn():
"""Updates tree with the best layer from accumulated summaries."""
# Take out the accumulated summaries from the accumulator and grow.
stats_summaries_list = []
stats_summaries_list = [
array_ops.unstack(accumulator.take_grad(1), axis=0)
for accumulator in accumulators
]
grow_op = self._grow_tree_from_stats_summaries(
stats_summaries_list, feature_ids_list, last_layer_nodes_range)
return grow_op
grow_model = control_flow_ops.cond(
math_ops.greater_equal(min_accumulated, self._n_batches_per_layer),
grow_tree_from_accumulated_summaries_fn,
control_flow_ops.no_op,
name='wait_until_n_batches_accumulated')
return grow_model
示例12: _update_mask
def _update_mask(self, weights, threshold):
"""Updates the mask for a given weight tensor.
This functions first computes the cdf of the weight tensor, and estimates
the threshold value such that 'desired_sparsity' fraction of weights
have magnitude less than the threshold.
Args:
weights: The weight tensor that needs to be masked.
threshold: The current threshold value. The function will compute a new
threshold and return the exponential moving average using the current
value of threshold
Returns:
new_threshold: The new value of the threshold based on weights, and
sparsity at the current global_step
new_mask: A numpy array of the same size and shape as weights containing
0 or 1 to indicate which of the values in weights falls below
the threshold
Raises:
ValueError: if sparsity is not defined
"""
if self._sparsity is None:
raise ValueError('Sparsity variable undefined')
sparsity = self._get_sparsity(weights.op.name)
with ops.name_scope(weights.op.name + '_pruning_ops'):
abs_weights = math_ops.abs(weights)
k = math_ops.cast(
math_ops.round(
math_ops.cast(array_ops.size(abs_weights), dtypes.float32) *
(1 - sparsity)), dtypes.int32)
# Sort the entire array
values, _ = nn_ops.top_k(
array_ops.reshape(abs_weights, [-1]), k=array_ops.size(abs_weights))
# Grab the (k-1) th value
current_threshold = array_ops.gather(values, k - 1)
smoothed_threshold = math_ops.add_n([
math_ops.multiply(current_threshold, 1 - self._spec.threshold_decay),
math_ops.multiply(threshold, self._spec.threshold_decay)
])
new_mask = math_ops.cast(
math_ops.greater_equal(abs_weights, smoothed_threshold),
dtypes.float32)
return smoothed_threshold, new_mask
示例13: assert_greater_equal
def assert_greater_equal(x, y, data=None, summarize=None, message=None,
name=None):
"""Assert the condition `x >= y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_greater_equal(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] >= y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to
"assert_greater_equal"
Returns:
Op that raises `InvalidArgumentError` if `x >= y` is False.
"""
message = message or ''
with ops.name_scope(name, 'assert_greater_equal', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if context.executing_eagerly():
x_name = _shape_and_dtype_str(x)
y_name = _shape_and_dtype_str(y)
else:
x_name = x.name
y_name = y.name
if data is None:
data = [
message,
'Condition x >= y did not hold element-wise:'
'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y
]
condition = math_ops.reduce_all(math_ops.greater_equal(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
示例14: grow_not_in_mem
def grow_not_in_mem():
"""Accumulates the data and grows a layer when ready."""
accumulators = []
dependencies = []
for i, feature_ids in enumerate(feature_ids_list):
stats_summaries = stats_summaries_list[i]
accumulator = data_flow_ops.ConditionalAccumulator(
dtype=dtypes.float32,
# The stats consist of grads and hessians (the last dimension).
shape=[len(feature_ids), max_splits, bucket_size_list[i], 2],
shared_name='numeric_stats_summary_accumulator_' + str(i))
accumulators.append(accumulator)
apply_grad = accumulator.apply_grad(
array_ops.stack(stats_summaries, axis=0), stamp_token)
dependencies.append(apply_grad)
def grow_tree_from_accumulated_summaries_fn():
"""Updates tree with the best layer from accumulated summaries."""
# Take out the accumulated summaries from the accumulator and grow.
stats_summaries_list = []
stats_summaries_list = [
array_ops.unstack(accumulator.take_grad(1), axis=0)
for accumulator in accumulators
]
grow_op = grow_tree_from_stats_summaries(stats_summaries_list,
feature_ids_list)
return grow_op
with ops.control_dependencies(dependencies):
if config.is_chief:
min_accumulated = math_ops.reduce_min(
array_ops.stack(
[acc.num_accumulated() for acc in accumulators]))
grow_model = control_flow_ops.cond(
math_ops.greater_equal(min_accumulated, n_batches_per_layer),
grow_tree_from_accumulated_summaries_fn,
control_flow_ops.no_op,
name='wait_until_n_batches_accumulated')
return grow_model
else:
return control_flow_ops.no_op()
示例15: dropped_inputs
def dropped_inputs(inputs=inputs, rate=self.rate, seed=self.seed): # pylint: disable=missing-docstring
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
alpha_p = -alpha * scale
kept_idx = math_ops.greater_equal(
K.random_uniform(noise_shape, seed=seed), rate)
kept_idx = math_ops.cast(kept_idx, K.floatx())
# Get affine transformation params
a = ((1 - rate) * (1 + rate * alpha_p**2))**-0.5
b = -a * alpha_p * rate
# Apply mask
x = inputs * kept_idx + alpha_p * (1 - kept_idx)
# Do affine transformation
return a * x + b