本文整理汇总了Python中tensorflow.reduce_all方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.reduce_all方法的具体用法?Python tensorflow.reduce_all怎么用?Python tensorflow.reduce_all使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.reduce_all方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: is_same_dynamic_shape
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_all [as 别名]
def is_same_dynamic_shape(x, y):
"""
Whether `x` and `y` has the same dynamic shape.
:param x: A Tensor.
:param y: A Tensor.
:return: A scalar Tensor of `bool`.
"""
# There is a BUG of Tensorflow for not doing static shape inference
# right in nested tf.cond()'s, so we are not comparing x and y's
# shape directly but working with their concatenations.
return tf.cond(
tf.equal(tf.rank(x), tf.rank(y)),
lambda: tf.reduce_all(tf.equal(
tf.concat([tf.shape(x), tf.shape(y)], 0),
tf.concat([tf.shape(y), tf.shape(x)], 0))),
lambda: tf.convert_to_tensor(False, tf.bool))
示例2: chk_pos_in_bounds
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_all [as 别名]
def chk_pos_in_bounds(cls, input_seq, pos):
"""
Check the position is in-bounds with respect to the sequence.
Accepted range for 'position' is in [-n, n - 1], where n is the
number of tensors in 'input_sequence'.
:param input_seq: input sequence
:param pos: position of the output tensor
:return: True if position is in-bounds or input length is dynamic.
"""
seq_length = input_seq.shape[0]
if seq_length is None: return True
seq_length = tf.cast(seq_length, pos.dtype)
cond1 = tf.greater_equal(pos, tf.negative(seq_length))
cond2 = tf.less_equal(pos, seq_length - 1)
# pos >= -n and pos < n
return tf.reduce_all(tf.logical_and(cond1, cond2))
示例3: chk_pos_in_bounds
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_all [as 别名]
def chk_pos_in_bounds(cls, input_seq, pos):
"""
Check the position is in-bounds with respect to the sequence.
Accepted range for 'position' is in [-n, n - 1], where n is the
number of tensors in 'input_sequence'.
:param input_seq: input sequence
:param pos: position of the output tensor
:return: True if position is in-bounds
"""
seq_length = tf.shape(input_seq.to_sparse(), out_type=pos.dtype)[0]
cond1 = tf.greater_equal(pos, tf.negative(seq_length))
cond2 = tf.less_equal(pos, seq_length - 1)
# pos >= -n and pos < n
return tf.reduce_all(tf.logical_and(cond1, cond2))
示例4: chk_pos_in_bounds
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_all [as 别名]
def chk_pos_in_bounds(cls, input_seq, pos):
"""
Check the position is in-bounds with respect to the sequence.
Accepted range for 'position' is in [-n, n], where n is the
number of tensors in 'input_sequence'.
:param input_seq: input sequence
:param pos: position to insert the tensor
:return: True if position is in-bounds.
"""
seq_length = tf.shape(input_seq.to_sparse(), out_type=pos.dtype)[0]
cond1 = tf.greater_equal(pos, tf.negative(seq_length))
cond2 = tf.less_equal(pos, seq_length)
# pos >= -n and pos <= n
return tf.reduce_all(tf.logical_and(cond1, cond2))
示例5: assert_binary
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_all [as 别名]
def assert_binary(tensor, name=None):
"""Asserts that all the values in the tensor are zeros or ones.
Args:
tensor: A tensor of shape `[A1, ..., An]` containing the values we want to
check.
name: A name for this op. Defaults to "assert_binary".
Returns:
The input tensor, with dependence on the assertion operator in the graph.
Raises:
tf.errors.InvalidArgumentError: If any of the values in the tensor is not
zero or one.
"""
if not FLAGS[tfg_flags.TFG_ADD_ASSERTS_TO_GRAPH].value:
return tensor
with tf.compat.v1.name_scope(name, 'assert_binary', [tensor]):
tensor = tf.convert_to_tensor(value=tensor)
condition = tf.reduce_all(
input_tensor=tf.logical_or(tf.equal(tensor, 0), tf.equal(tensor, 1)))
with tf.control_dependencies([tf.Assert(condition, data=[tensor])]):
return tf.identity(tensor)
示例6: next_inputs
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_all [as 别名]
def next_inputs(self, time, outputs, state, sample_ids, stop_token_prediction, name=None):
'''Stop on EOS. Otherwise, pass the last output as the next input and pass through state.'''
with tf.name_scope('TacoTestHelper'):
#A sequence is finished when the output probability is > 0.5
finished = tf.cast(tf.round(stop_token_prediction), tf.bool)
#Since we are predicting r frames at each step, two modes are
#then possible:
# Stop when the model outputs a p > 0.5 for any frame between r frames (Recommended)
# Stop when the model outputs a p > 0.5 for all r frames (Safer)
#Note:
# With enough training steps, the model should be able to predict when to stop correctly
# and the use of stop_at_any = True would be recommended. If however the model didn't
# learn to stop correctly yet, (stops too soon) one could choose to use the safer option
# to get a correct synthesis
if hparams.stop_at_any:
finished = tf.reduce_any(finished) #Recommended
else:
finished = tf.reduce_all(finished) #Safer option
# Feed last output frame as next input. outputs is [N, output_dim * r]
next_inputs = outputs[:, -self._output_dim:]
next_state = state
return (finished, next_inputs, next_state)
示例7: keep_for_training
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_all [as 别名]
def keep_for_training(self, features, maximum_length=None):
if not isinstance(maximum_length, list):
maximum_length = [maximum_length]
# Unset maximum lengths are set to None (i.e. no constraint).
maximum_length += [None] * (len(self.inputters) - len(maximum_length))
constraints = []
for i, inputter in enumerate(self.inputters):
keep = inputter.keep_for_training(
self._index_features(features, i), maximum_length=maximum_length[i])
if isinstance(keep, bool):
if not keep:
return False
continue
constraints.append(keep)
if not constraints:
return True
return tf.reduce_all(constraints)
示例8: next_inputs
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_all [as 别名]
def next_inputs(self, time, outputs, state, sample_ids):
(finished, base_next_inputs, state) = super().next_inputs(
time=time, outputs=outputs, state=state, sample_ids=sample_ids
)
def maybe_sample():
"""Perform scheduled sampling."""
where_sampling = tf.cast(tf.where(sample_ids > -1), tf.int32)
where_not_sampling = tf.cast(tf.where(sample_ids <= -1), tf.int32)
sample_ids_sampling = tf.gather_nd(sample_ids, where_sampling)
inputs_not_sampling = tf.gather_nd(base_next_inputs, where_not_sampling)
sampled_next_inputs = self.embedding_fn(sample_ids_sampling)
base_shape = tf.shape(base_next_inputs)
return tf.scatter_nd(
indices=where_sampling, updates=sampled_next_inputs, shape=base_shape
) + tf.scatter_nd(
indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape,
)
all_finished = tf.reduce_all(finished)
next_inputs = tf.cond(all_finished, lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
示例9: random_crop_image
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_all [as 别名]
def random_crop_image(img, size, offset=None):
# adapted from code from tf.random_crop
shape = tf.shape(img)
#remove the assertion for now since it makes the queue filling slow for some reason
#check = tf.Assert(
# tf.reduce_all(shape[:2] >= size),
# ["Need value.shape >= size, got ", shape, size])
#with tf.control_dependencies([check]):
# img = tf.identity(img)
limit = shape[:2] - size + 1
dtype = tf.int32
if offset is None:
offset = tf.random_uniform(shape=(2,), dtype=dtype, maxval=dtype.max, seed=None) % limit
offset = tf.stack([offset[0], offset[1], 0])
size0 = size[0] if isinstance(size[0], int) else None
size1 = size[1] if isinstance(size[1], int) else None
size_im = tf.stack([size[0], size[1], img.get_shape().as_list()[2]])
img_cropped = tf.slice(img, offset, size_im)
out_shape_img = [size0, size1, img.get_shape()[2]]
img_cropped.set_shape(out_shape_img)
return img_cropped, offset
示例10: zero_all_if_any_non_finite
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_all [as 别名]
def zero_all_if_any_non_finite(structure):
"""Zeroes out all entries in input if any are not finite.
Args:
structure: A structure supported by tf.nest.
Returns:
A tuple (input, 0) if all entries are finite or the structure is empty, or
a tuple (zeros, 1) if any non-finite entries were found.
"""
flat = tf.nest.flatten(structure)
if not flat:
return (structure, tf.constant(0))
flat_bools = [tf.reduce_all(tf.math.is_finite(t)) for t in flat]
all_finite = functools.reduce(tf.logical_and, flat_bools)
if all_finite:
return (structure, tf.constant(0))
else:
return (tf.nest.map_structure(tf.zeros_like, structure), tf.constant(1))
示例11: categorical_accuracy_with_variable_timestep
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_all [as 别名]
def categorical_accuracy_with_variable_timestep(y_true, y_pred):
# Actually discarding is not needed if the dummy is an all-zeros array
# (It is indeed encoded in an all-zeros array by
# CaptionPreprocessing.preprocess_batch)
y_true = y_true[:, :-1, :] # Discard the last timestep/word (dummy)
y_pred = y_pred[:, :-1, :] # Discard the last timestep/word (dummy)
# Flatten the timestep dimension
shape = tf.shape(y_true)
y_true = tf.reshape(y_true, [-1, shape[-1]])
y_pred = tf.reshape(y_pred, [-1, shape[-1]])
# Discard rows that are all zeros as they represent dummy or padding words.
is_zero_y_true = tf.equal(y_true, 0)
is_zero_row_y_true = tf.reduce_all(is_zero_y_true, axis=-1)
y_true = tf.boolean_mask(y_true, ~is_zero_row_y_true)
y_pred = tf.boolean_mask(y_pred, ~is_zero_row_y_true)
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(y_true, axis=1),
tf.argmax(y_pred, axis=1)),
dtype=tf.float32))
return accuracy
# As Keras stores a function's name as its metric's name
示例12: convert_from_color_segmentation
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_all [as 别名]
def convert_from_color_segmentation(color_value_dict, arr_3d, tensor_type=False):
if tensor_type :
arr_2d = tf.zeros(shape=[tf.shape(arr_3d)[0], tf.shape(arr_3d)[1]], dtype=tf.uint8)
for c, i in color_value_dict.items() :
color_array = tf.reshape(np.asarray(c, dtype=np.uint8), shape=[1, 1, -1])
condition = tf.reduce_all(tf.equal(arr_3d, color_array), axis=-1)
arr_2d = tf.where(condition, tf.cast(tf.fill(tf.shape(arr_2d), i), tf.uint8), arr_2d)
return arr_2d
else :
arr_2d = np.zeros((np.shape(arr_3d)[0], np.shape(arr_3d)[1]), dtype=np.uint8)
for c, i in color_value_dict.items():
color_array = np.asarray(c, np.float32).reshape([1, 1, -1])
m = np.all(arr_3d == color_array, axis=-1)
arr_2d[m] = i
return arr_2d
示例13: next_inputs
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_all [as 别名]
def next_inputs(self, time, outputs, state, sample_ids, stop_token_prediction, name=None):
'''Stop on EOS. Otherwise, pass the last output as the next input and pass through state.'''
with tf.name_scope('TacoTestHelper'):
#A sequence is finished when the output probability is > 0.5
finished = tf.cast(tf.round(stop_token_prediction), tf.bool)
#Since we are predicting r frames at each step, two modes are
#then possible:
# Stop when the model outputs a p > 0.5 for any frame between r frames (Recommended)
# Stop when the model outputs a p > 0.5 for all r frames (Safer)
#Note:
# With enough training steps, the model should be able to predict when to stop correctly
# and the use of stop_at_any = True would be recommended. If however the model didn't
# learn to stop correctly yet, (stops too soon) one could choose to use the safer option
# to get a correct synthesis
if self.stop_at_any:
finished = tf.reduce_any(tf.reduce_all(finished, axis=0)) #Recommended
else:
finished = tf.reduce_all(tf.reduce_all(finished, axis=0)) #Safer option
# Feed last output frame as next input. outputs is [N, output_dim * r]
next_inputs = outputs[:, -self._output_dim:]
next_state = state
return (finished, next_inputs, next_state)
示例14: CheckZeroOneCode
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_all [as 别名]
def CheckZeroOneCode(x):
return tf.reduce_all(tf.equal(x * (x - 1.0), 0))
示例15: log_quaternion_loss_batch
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_all [as 别名]
def log_quaternion_loss_batch(predictions, labels, params):
"""A helper function to compute the error between quaternions.
Args:
predictions: A Tensor of size [batch_size, 4].
labels: A Tensor of size [batch_size, 4].
params: A dictionary of parameters. Expecting 'use_logging', 'batch_size'.
Returns:
A Tensor of size [batch_size], denoting the error between the quaternions.
"""
use_logging = params['use_logging']
assertions = []
if use_logging:
assertions.append(
tf.Assert(
tf.reduce_all(
tf.less(
tf.abs(tf.reduce_sum(tf.square(predictions), [1]) - 1),
1e-4)),
['The l2 norm of each prediction quaternion vector should be 1.']))
assertions.append(
tf.Assert(
tf.reduce_all(
tf.less(
tf.abs(tf.reduce_sum(tf.square(labels), [1]) - 1), 1e-4)),
['The l2 norm of each label quaternion vector should be 1.']))
with tf.control_dependencies(assertions):
product = tf.multiply(predictions, labels)
internal_dot_products = tf.reduce_sum(product, [1])
if use_logging:
internal_dot_products = tf.Print(
internal_dot_products,
[internal_dot_products, tf.shape(internal_dot_products)],
'internal_dot_products:')
logcost = tf.log(1e-4 + 1 - tf.abs(internal_dot_products))
return logcost