本文整理汇总了Python中tensorflow.reduce_any方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.reduce_any方法的具体用法?Python tensorflow.reduce_any怎么用?Python tensorflow.reduce_any使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.reduce_any方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: yolo_nms
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_any [as 别名]
def yolo_nms(outputs, anchors, masks, num_classes, iou_threshold=0.6, score_threshold=0.15):
boxes, confs, classes = [], [], []
for o in outputs:
boxes.append(tf.reshape(o[0], (tf.shape(o[0])[0], -1, tf.shape(o[0])[-1])))
confs.append(tf.reshape(o[1], (tf.shape(o[0])[0], -1, tf.shape(o[1])[-1])))
classes.append(tf.reshape(o[2], (tf.shape(o[0])[0], -1, tf.shape(o[2])[-1])))
boxes = tf.concat(boxes, axis=1)
confs = tf.concat(confs, axis=1)
class_probs = tf.concat(classes, axis=1)
box_scores = confs * class_probs
mask = box_scores >= score_threshold
mask = tf.reduce_any(mask, axis=-1)
class_boxes = tf.boolean_mask(boxes, mask)
class_boxes = tf.reshape(class_boxes, (tf.shape(boxes)[0], -1, 4))
class_box_scores = tf.boolean_mask(box_scores, mask)
class_box_scores = tf.reshape(class_box_scores, (tf.shape(boxes)[0], -1, num_classes))
class_boxes, class_box_scores = tf.py_function(func=batched_nms,
inp=[class_boxes, class_box_scores, num_classes, iou_threshold],
Tout=[tf.float32, tf.float32])
classes = tf.argmax(class_box_scores, axis=-1)
return class_boxes, class_box_scores, classes
示例2: testLoss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_any [as 别名]
def testLoss(self):
batch_size = 2
key_depth = 5
val_depth = 5
memory_size = 4
window_size = 3
x_depth = 5
memory = transformer_memory.TransformerMemory(
batch_size, key_depth, val_depth, memory_size)
x = tf.random_uniform([batch_size, window_size, x_depth], minval=.0)
memory_results, _, _, _ = (
memory.pre_attention(
tf.random_uniform([batch_size], minval=0, maxval=1, dtype=tf.int32),
x, None, None))
x = memory.post_attention(memory_results, x)
with tf.control_dependencies([tf.print("x", x)]):
is_nan = tf.reduce_any(tf.math.is_nan(x))
with self.test_session() as session:
session.run(tf.global_variables_initializer())
for _ in range(100):
is_nan_value, _ = session.run([is_nan, x])
self.assertEqual(is_nan_value, False)
示例3: bi_attention_mx
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_any [as 别名]
def bi_attention_mx(config, is_train, p, h, p_mask=None, h_mask=None, scope=None): #[N, L, 2d]
with tf.variable_scope(scope or "dense_logit_bi_attention"):
PL = p.get_shape()[1]
HL = h.get_shape()[1]
p_aug = tf.tile(tf.expand_dims(p, 2), [1,1,config.max_seq_len_word,1])
h_aug = tf.tile(tf.expand_dims(h, 1), [1,config.max_seq_len_word,1,1]) #[N, PL, HL, 2d]
if p_mask is None:
ph_mask = None
else:
p_mask_aug = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(p_mask, 2), [1, 1, config.max_seq_len_word, 1]), tf.bool), axis=3)
h_mask_aug = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(h_mask, 1), [1, config.max_seq_len_word, 1, 1]), tf.bool), axis=3)
ph_mask = p_mask_aug & h_mask_aug
ph_mask = None
h_logits = p_aug * h_aug
return h_logits
示例4: self_attention
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_any [as 别名]
def self_attention(config, is_train, p, p_mask=None, scope=None): #[N, L, 2d]
with tf.variable_scope(scope or "self_attention"):
PL = p.get_shape()[1]
dim = p.get_shape()[-1]
# HL = tf.shape(h)[1]
p_aug_1 = tf.tile(tf.expand_dims(p, 2), [1,1,config.max_seq_len_word,1])
p_aug_2 = tf.tile(tf.expand_dims(p, 1), [1,config.max_seq_len_word,1,1]) #[N, PL, HL, 2d]
if p_mask is None:
ph_mask = None
else:
p_mask_aug_1 = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(p_mask, 2), [1, 1, config.max_seq_len_word, 1]), tf.bool), axis=3)
p_mask_aug_2 = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(p_mask, 1), [1, config.max_seq_len_word, 1, 1]), tf.bool), axis=3)
self_mask = p_mask_aug_1 & p_mask_aug_2
h_logits = get_logits([p_aug_1, p_aug_2], None, True, wd=config.wd, mask=self_mask,
is_train=is_train, func=config.self_att_logit_func, scope='h_logits') # [N, PL, HL]
self_att = softsel(p_aug_2, h_logits)
return self_att
示例5: search
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_any [as 别名]
def search(self, initial_ids, initial_cache):
"""Beam search for sequences with highest scores."""
state, state_shapes = self._create_initial_state(initial_ids, initial_cache)
finished_state = tf.while_loop(
cond=self._continue_search, body=self._search_step, loop_vars=[state],
shape_invariants=[state_shapes], parallel_iterations=1, back_prop=False)
finished_state = finished_state[0]
alive_seq = finished_state[_StateKeys.ALIVE_SEQ]
alive_log_probs = finished_state[_StateKeys.ALIVE_LOG_PROBS]
finished_seq = finished_state[_StateKeys.FINISHED_SEQ]
finished_scores = finished_state[_StateKeys.FINISHED_SCORES]
finished_flags = finished_state[_StateKeys.FINISHED_FLAGS]
# Account for corner case where there are no finished sequences for a
# particular batch item. In that case, return alive sequences for that batch
# item.
finished_seq = tf.compat.v1.where(
tf.reduce_any(input_tensor=finished_flags, axis=1), finished_seq, alive_seq)
finished_scores = tf.compat.v1.where(
tf.reduce_any(input_tensor=finished_flags, axis=1), finished_scores, alive_log_probs)
return finished_seq, finished_scores
示例6: next_inputs
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_any [as 别名]
def next_inputs(self, time, outputs, state, sample_ids, stop_token_prediction, name=None):
'''Stop on EOS. Otherwise, pass the last output as the next input and pass through state.'''
with tf.name_scope('TacoTestHelper'):
#A sequence is finished when the output probability is > 0.5
finished = tf.cast(tf.round(stop_token_prediction), tf.bool)
#Since we are predicting r frames at each step, two modes are
#then possible:
# Stop when the model outputs a p > 0.5 for any frame between r frames (Recommended)
# Stop when the model outputs a p > 0.5 for all r frames (Safer)
#Note:
# With enough training steps, the model should be able to predict when to stop correctly
# and the use of stop_at_any = True would be recommended. If however the model didn't
# learn to stop correctly yet, (stops too soon) one could choose to use the safer option
# to get a correct synthesis
if hparams.stop_at_any:
finished = tf.reduce_any(finished) #Recommended
else:
finished = tf.reduce_all(finished) #Safer option
# Feed last output frame as next input. outputs is [N, output_dim * r]
next_inputs = outputs[:, -self._output_dim:]
next_state = state
return (finished, next_inputs, next_state)
示例7: __call__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_any [as 别名]
def __call__(self,input_var,name=None,**kwargs) :
def _init():
v_norm = tf.nn.l2_normalize(self.v,axis=[0,1,2])
t = tf.nn.conv2d(input_var,v_norm,self.strides,self.padding,data_format='NHWC')
mu,var = tf.nn.moments(t,axes=[0,1,2])
std = tf.sqrt(var+self.epsilon)
return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)]
require_init = tf.reduce_any(tf.is_nan(self.g))
init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b])
with tf.control_dependencies(init_ops):
w = tf.reshape(self.g,[1,1,1,tf.shape(self.v)[-1]]) * tf.nn.l2_normalize(self.v,axis=[0,1,2])
return tf.nn.bias_add(
tf.nn.conv2d(input_var, w,data_format='NHWC',
strides=self.strides, padding=self.padding),
self.b,data_format='NHWC',name=name)
示例8: reduce_any
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_any [as 别名]
def reduce_any(input_tensor, axis=None, keepdims=None,
name=None, reduction_indices=None):
"""
Wrapper around the tf.reduce_any to handle argument keep_dims
"""
return reduce_function(tf.reduce_any, input_tensor, axis=axis,
keepdims=keepdims, name=name,
reduction_indices=reduction_indices)
示例9: filter_short_segments
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_any [as 别名]
def filter_short_segments(self, sample):
""" Filter out too short segment. """
return tf.reduce_any([
tf.shape(sample[f'{instrument}_spectrogram'])[0] >= self._T
for instrument in self._instruments])
示例10: _BuildSequence
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_any [as 别名]
def _BuildSequence(self,
batch_size,
max_steps,
features,
state,
use_average=False):
"""Adds a sequence of beam parsing steps."""
def Advance(state, step, scores_array, alive, alive_steps, *features):
scores = self._BuildNetwork(features,
return_average=use_average)['logits']
scores_array = scores_array.write(step, scores)
features, state, alive = (
gen_parser_ops.beam_parser(state, scores, self._feature_size))
return [state, step + 1, scores_array, alive, alive_steps + tf.cast(
alive, tf.int32)] + list(features)
# args: (state, step, scores_array, alive, alive_steps, *features)
def KeepGoing(*args):
return tf.logical_and(args[1] < max_steps, tf.reduce_any(args[3]))
step = tf.constant(0, tf.int32, [])
scores_array = tensor_array_ops.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=True)
alive = tf.constant(True, tf.bool, [batch_size])
alive_steps = tf.constant(0, tf.int32, [batch_size])
t = tf.while_loop(
KeepGoing,
Advance,
[state, step, scores_array, alive, alive_steps] + list(features),
shape_invariants=[tf.TensorShape(None)] * (len(features) + 5),
parallel_iterations=100)
# Link to the final nodes/values of ops that have passed through While:
return {'state': t[0],
'concat_scores': t[2].concat(),
'alive': t[3],
'alive_steps': t[4]}
示例11: prune_outside_window
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_any [as 别名]
def prune_outside_window(boxlist, window, scope=None):
"""Prunes bounding boxes that fall outside a given window.
This function prunes bounding boxes that even partially fall outside the given
window. See also clip_to_window which only prunes bounding boxes that fall
completely outside the window, and clips any bounding boxes that partially
overflow.
Args:
boxlist: a BoxList holding M_in boxes.
window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
of the window
scope: name scope.
Returns:
pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in
valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
in the input tensor.
"""
with tf.name_scope(scope, 'PruneOutsideWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
coordinate_violations = tf.concat([
tf.less(y_min, win_y_min), tf.less(x_min, win_x_min),
tf.greater(y_max, win_y_max), tf.greater(x_max, win_x_max)
], 1)
valid_indices = tf.reshape(
tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
return gather(boxlist, valid_indices), valid_indices
示例12: prune_completely_outside_window
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_any [as 别名]
def prune_completely_outside_window(boxlist, window, scope=None):
"""Prunes bounding boxes that fall completely outside of the given window.
The function clip_to_window prunes bounding boxes that fall
completely outside the window, but also clips any bounding boxes that
partially overflow. This function does not clip partially overflowing boxes.
Args:
boxlist: a BoxList holding M_in boxes.
window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
of the window
scope: name scope.
Returns:
pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in
valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
in the input tensor.
"""
with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
coordinate_violations = tf.concat([
tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max),
tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min)
], 1)
valid_indices = tf.reshape(
tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
return gather(boxlist, valid_indices), valid_indices
示例13: _define_step
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_any [as 别名]
def _define_step(self, done, score, summary):
"""Combine operations of a phase.
Keeps track of the mean score and when to report it.
Args:
done: Tensor indicating whether current score can be used.
score: Tensor holding the current, possibly intermediate, score.
summary: Tensor holding summary string to write if not an empty string.
Returns:
Tuple of summary tensor, mean score, and new global step. The mean score
is zero for non reporting steps.
"""
if done.shape.ndims == 0:
done = done[None]
if score.shape.ndims == 0:
score = score[None]
score_mean = streaming_mean.StreamingMean((), tf.float32)
with tf.control_dependencies([done, score, summary]):
done_score = tf.gather(score, tf.where(done)[:, 0])
submit_score = tf.cond(
tf.reduce_any(done), lambda: score_mean.submit(done_score), tf.no_op)
with tf.control_dependencies([submit_score]):
mean_score = tf.cond(self._report, score_mean.clear, float)
steps_made = tf.shape(score)[0]
next_step = self._step.assign_add(steps_made)
with tf.control_dependencies([mean_score, next_step]):
return tf.identity(summary), mean_score, next_step, steps_made
示例14: transform_targets_for_output
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_any [as 别名]
def transform_targets_for_output(y_true, grid_y, grid_x, anchor_idxs, classes):
# y_true: (N, boxes, (x1, y1, x2, y2, class, best_anchor))
N = tf.shape(y_true)[0]
# y_true_out: (N, grid, grid, anchors, [x, y, w, h, obj, class])
y_true_out = tf.zeros((N, grid_y, grid_x, tf.shape(anchor_idxs)[0], 6))
anchor_idxs = tf.cast(anchor_idxs, tf.int32)
indexes = tf.TensorArray(tf.int32, 1, dynamic_size=True)
updates = tf.TensorArray(tf.float32, 1, dynamic_size=True)
idx = 0
for i in tf.range(N):
for j in tf.range(tf.shape(y_true)[1]):
if tf.equal(y_true[i][j][2], 0):
continue
anchor_eq = tf.equal(anchor_idxs, tf.cast(y_true[i][j][5], tf.int32))
if tf.reduce_any(anchor_eq):
box = y_true[i][j][0:4]
box_xy = (y_true[i][j][0:2] + y_true[i][j][2:4]) / 2.
anchor_idx = tf.cast(tf.where(anchor_eq), tf.int32)
grid_size = tf.cast(tf.stack([grid_x, grid_y], axis=-1), tf.float32)
grid_xy = tf.cast(box_xy * grid_size, tf.int32)
# grid[y][x][anchor] = (tx, ty, bw, bh, obj, class)
indexes = indexes.write(idx, [i, grid_xy[1], grid_xy[0], anchor_idx[0][0]])
updates = updates.write(idx, [box[0], box[1], box[2], box[3], 1, y_true[i][j][4]])
idx += 1
y_ture_out = tf.tensor_scatter_nd_update(y_true_out, indexes.stack(), updates.stack())
return y_ture_out
示例15: prune_completely_outside_window
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import reduce_any [as 别名]
def prune_completely_outside_window(boxlist, window, scope=None):
"""Prunes bounding boxes that fall completely outside of the given window.
The function clip_to_window prunes bounding boxes that fall
completely outside the window, but also clips any bounding boxes that
partially overflow. This function does not clip partially overflowing boxes.
Args:
boxlist: a BoxList holding M_in boxes.
window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
of the window
scope: name scope.
Returns:
pruned_boxlist: a new BoxList with all bounding boxes partially or fully in
the window.
valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
in the input tensor.
"""
with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
coordinate_violations = tf.concat([
tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max),
tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min)
], 1)
valid_indices = tf.reshape(
tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
return gather(boxlist, valid_indices), valid_indices