本文整理汇总了Python中tensorflow.compat.v1.greater方法的典型用法代码示例。如果您正苦于以下问题:Python v1.greater方法的具体用法?Python v1.greater怎么用?Python v1.greater使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.greater方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: unwrap
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import greater [as 别名]
def unwrap(p, discont=np.pi, axis=-1):
"""Unwrap a cyclical phase tensor.
Args:
p: Phase tensor.
discont: Float, size of the cyclic discontinuity.
axis: Axis of which to unwrap.
Returns:
unwrapped: Unwrapped tensor of same size as input.
"""
dd = diff(p, axis=axis)
ddmod = tf.mod(dd + np.pi, 2.0 * np.pi) - np.pi
idx = tf.logical_and(tf.equal(ddmod, -np.pi), tf.greater(dd, 0))
ddmod = tf.where(idx, tf.ones_like(ddmod) * np.pi, ddmod)
ph_correct = ddmod - dd
idx = tf.less(tf.abs(dd), discont)
ddmod = tf.where(idx, tf.zeros_like(ddmod), dd)
ph_cumsum = tf.cumsum(ph_correct, axis=axis)
shape = p.get_shape().as_list()
shape[axis] = 1
ph_cumsum = tf.concat([tf.zeros(shape, dtype=p.dtype), ph_cumsum], axis=axis)
unwrapped = p + ph_cumsum
return unwrapped
示例2: _decode_masks
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import greater [as 别名]
def _decode_masks(self, parsed_tensors):
"""Decode a set of PNG masks to the tf.float32 tensors."""
def _decode_png_mask(png_bytes):
mask = tf.squeeze(
tf.io.decode_png(png_bytes, channels=1, dtype=tf.uint8), axis=-1)
mask = tf.cast(mask, dtype=tf.float32)
mask.set_shape([None, None])
return mask
height = parsed_tensors['image/height']
width = parsed_tensors['image/width']
masks = parsed_tensors['image/object/mask']
return tf.cond(
tf.greater(tf.size(masks), 0),
lambda: tf.map_fn(_decode_png_mask, masks, dtype=tf.float32),
lambda: tf.zeros([0, height, width], dtype=tf.float32))
示例3: maybe_add_noise
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import greater [as 别名]
def maybe_add_noise(image, noise_shape, scale0, scale1,
image_noise_probability, image_noise_ratio):
"""Add noise at two scales."""
if image_noise_probability < 0.000001 or (
image_noise_ratio < 0.000001):
return image
noise_list = []
for scale in [scale0, scale1]:
rand_image_noise_ratio = tf.random.uniform(
shape=[], minval=0.0, maxval=image_noise_ratio)
noise_list.append(
_rand_noise(0.0, rand_image_noise_ratio, scale, noise_shape))
skip_noise = tf.greater(tf.random.uniform([]), image_noise_probability)
image = tf.cond(skip_noise,
lambda: image, lambda: image + noise_list[0])
image = tf.cond(skip_noise,
lambda: image, lambda: image + noise_list[1])
return image
示例4: intensity_shift
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import greater [as 别名]
def intensity_shift(
image, label, per_class_intensity_scale, per_class_intensity_shift):
"""Perturb intensity in lesion and non-lesion regions."""
if per_class_intensity_scale < 0.000001 and (
per_class_intensity_shift < 0.000001):
return image
# Randomly change (mostly increase) intensity of non-lesion region.
per_class_noise = _truncated_normal(
per_class_intensity_shift, per_class_intensity_scale)
image = image + per_class_noise * (
image * tf.cast(tf.greater(label, 1.5), tf.float32))
# Randomly change (mostly decrease) intensity of lesion region.
per_class_noise = _truncated_normal(
-per_class_intensity_shift, per_class_intensity_scale)
image = image + per_class_noise * (
image * tf.cast(tf.less(label, 1.5), tf.float32))
return image
示例5: image_corruption
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import greater [as 别名]
def image_corruption(
image, label, reso, image_corrupt_ratio_mean, image_corrupt_ratio_stddev):
"""Randomly drop non-lesion pixels."""
if image_corrupt_ratio_mean < 0.000001 and (
image_corrupt_ratio_stddev < 0.000001):
return image
# Corrupt non-lesion region according to keep_mask.
keep_mask = _gen_rand_mask(
1 - image_corrupt_ratio_mean,
image_corrupt_ratio_stddev,
reso // 3, image.shape)
keep_mask = tf.logical_or(tf.greater(label, 1.5), keep_mask)
image *= tf.cast(keep_mask, tf.float32)
return image
示例6: compute_thresholded_labels
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import greater [as 别名]
def compute_thresholded_labels(labels, null_threshold=4):
"""Computes thresholded labels.
Args:
labels: <int32> [batch_size, num_annotators]
null_threshold: If number of null annotations is greater than or equal to
this threshold, all annotations are set to null for this example.
Returns:
thresholded_labels: <int32> [batch_size, num_annotators]
"""
null_labels = tf.equal(labels, 0)
# <int32> [batch_size]
null_count = tf.reduce_sum(tf.to_int32(null_labels), 1)
threshold_mask = tf.less(null_count, null_threshold)
# <bool> [batch_size, num_annotators]
threshold_mask = tf.tile(
tf.expand_dims(threshold_mask, -1), [1, tf.shape(labels)[1]])
# <bool> [batch_size, num_annotators]
thresholded_labels = tf.where(
threshold_mask, x=labels, y=tf.zeros_like(labels))
return thresholded_labels
示例7: f1_metric
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import greater [as 别名]
def f1_metric(precision, precision_op, recall, recall_op):
"""Computes F1 based on precision and recall.
Args:
precision: <float> [batch_size]
precision_op: Update op for precision.
recall: <float> [batch_size]
recall_op: Update op for recall.
Returns:
tensor and update op for F1.
"""
f1_op = tf.group(precision_op, recall_op)
numerator = 2 * tf.multiply(precision, recall)
denominator = tf.add(precision, recall)
f1 = tf.divide(numerator, denominator)
# <float> [batch_size]
zero_vec = tf.zeros_like(f1)
is_valid = tf.greater(denominator, zero_vec)
f1 = tf.where(is_valid, x=f1, y=zero_vec)
return f1, f1_op
示例8: _reshape_instance_masks
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import greater [as 别名]
def _reshape_instance_masks(self, keys_to_tensors):
"""Reshape instance segmentation masks.
The instance segmentation masks are reshaped to [num_instances, height,
width].
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 3-D float tensor of shape [num_instances, height, width] with values
in {0, 1}.
"""
height = keys_to_tensors['image/height']
width = keys_to_tensors['image/width']
to_shape = tf.cast(tf.stack([-1, height, width]), tf.int32)
masks = keys_to_tensors['image/object/mask']
if isinstance(masks, tf.SparseTensor):
masks = tf.sparse_tensor_to_dense(masks)
masks = tf.reshape(
tf.cast(tf.greater(masks, 0.0), dtype=tf.float32), to_shape)
return tf.cast(masks, tf.float32)
示例9: _get_refined_encodings_for_postitive_class
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import greater [as 别名]
def _get_refined_encodings_for_postitive_class(
self, refined_box_encodings, flat_cls_targets_with_background,
batch_size):
# We only predict refined location encodings for the non background
# classes, but we now pad it to make it compatible with the class
# predictions
refined_box_encodings_with_background = tf.pad(refined_box_encodings,
[[0, 0], [1, 0], [0, 0]])
refined_box_encodings_masked_by_class_targets = (
box_list_ops.boolean_mask(
box_list.BoxList(
tf.reshape(refined_box_encodings_with_background,
[-1, self._box_coder.code_size])),
tf.reshape(tf.greater(flat_cls_targets_with_background, 0), [-1]),
use_static_shapes=self._use_static_shapes,
indicator_sum=batch_size * self.max_num_proposals
if self._use_static_shapes else None).get())
return tf.reshape(
refined_box_encodings_masked_by_class_targets, [
batch_size, self.max_num_proposals,
self._box_coder.code_size
])
示例10: _padded_batched_proposals_indicator
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import greater [as 别名]
def _padded_batched_proposals_indicator(self,
num_proposals,
max_num_proposals):
"""Creates indicator matrix of non-pad elements of padded batch proposals.
Args:
num_proposals: Tensor of type tf.int32 with shape [batch_size].
max_num_proposals: Maximum number of proposals per image (integer).
Returns:
A Tensor of type tf.bool with shape [batch_size, max_num_proposals].
"""
batch_size = tf.size(num_proposals)
tiled_num_proposals = tf.tile(
tf.expand_dims(num_proposals, 1), [1, max_num_proposals])
tiled_proposal_index = tf.tile(
tf.expand_dims(tf.range(max_num_proposals), 0), [batch_size, 1])
return tf.greater(tiled_num_proposals, tiled_proposal_index)
示例11: test_visualize_boxes_in_image
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import greater [as 别名]
def test_visualize_boxes_in_image(self):
def graph_fn():
image = tf.zeros((6, 4, 3))
corners = tf.constant([[0, 0, 5, 3],
[0, 0, 3, 2]], tf.float32)
boxes = box_list.BoxList(corners)
image_and_boxes = box_list_ops.visualize_boxes_in_image(image, boxes)
image_and_boxes_bw = tf.cast(
tf.greater(tf.reduce_sum(image_and_boxes, 2), 0.0), dtype=tf.float32)
return image_and_boxes_bw
exp_result = [[1, 1, 1, 0],
[1, 1, 1, 0],
[1, 1, 1, 0],
[1, 0, 1, 0],
[1, 1, 1, 0],
[0, 0, 0, 0]]
output = self.execute_cpu(graph_fn, [])
self.assertAllEqual(output.astype(int), exp_result)
示例12: test_nested_loop
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import greater [as 别名]
def test_nested_loop():
graph = tf.Graph()
with graph.as_default():
def body(x):
def nest_body(c):
return tf.multiply(c, 2)
def cd(c): return tf.less(c, 10)
c = tf.constant(2)
res = tf.while_loop(cd, nest_body, loop_vars=[c])
return tf.nn.relu(x + res)
def condition(x):
return tf.greater(x, 100)
x = tf.constant(3)
r = tf.while_loop(condition, body, loop_vars=[x])
with tf.Session() as sess:
tf_out = sess.run(r)
check_equal(graph, tf_out)
示例13: _localization_loss
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import greater [as 别名]
def _localization_loss(self, pred_loc, gt_loc, gt_label, num_matched_boxes):
"""Computes the localization loss.
Computes the localization loss using smooth l1 loss.
Args:
pred_loc: a flatten tensor that includes all predicted locations. The
shape is [batch_size, num_anchors, 4].
gt_loc: a tensor representing box regression targets in
[batch_size, num_anchors, 4].
gt_label: a tensor that represents the classification groundtruth targets.
The shape is [batch_size, num_anchors, 1].
num_matched_boxes: the number of anchors that are matched to a groundtruth
targets, used as the loss normalizater. The shape is [batch_size].
Returns:
box_loss: a float32 representing total box regression loss.
"""
mask = tf.greater(tf.squeeze(gt_label), 0)
float_mask = tf.cast(mask, tf.float32)
smooth_l1 = tf.reduce_sum(tf.losses.huber_loss(
gt_loc, pred_loc,
reduction=tf.losses.Reduction.NONE
), axis=2)
smooth_l1 = tf.multiply(smooth_l1, float_mask)
box_loss = tf.reduce_sum(smooth_l1, axis=1)
return tf.reduce_mean(box_loss / num_matched_boxes)
示例14: _classification_loss
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import greater [as 别名]
def _classification_loss(self, pred_label, gt_label, num_matched_boxes):
"""Computes the classification loss.
Computes the classification loss with hard negative mining.
Args:
pred_label: a flatten tensor that includes all predicted class. The shape
is [batch_size, num_anchors, num_classes].
gt_label: a tensor that represents the classification groundtruth targets.
The shape is [batch_size, num_anchors, 1].
num_matched_boxes: the number of anchors that are matched to a groundtruth
targets. This is used as the loss normalizater.
Returns:
box_loss: a float32 representing total box regression loss.
"""
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
gt_label, pred_label, reduction=tf.losses.Reduction.NONE)
mask = tf.greater(tf.squeeze(gt_label), 0)
float_mask = tf.cast(mask, tf.float32)
# Hard example mining
neg_masked_cross_entropy = cross_entropy * (1 - float_mask)
relative_position = tf.argsort(
tf.argsort(
neg_masked_cross_entropy, direction='DESCENDING'))
num_neg_boxes = tf.minimum(
tf.to_int32(num_matched_boxes) * ssd_constants.NEGS_PER_POSITIVE,
ssd_constants.NUM_SSD_BOXES)
top_k_neg_mask = tf.cast(tf.less(
relative_position,
tf.tile(num_neg_boxes[:, tf.newaxis], (1, ssd_constants.NUM_SSD_BOXES))
), tf.float32)
class_loss = tf.reduce_sum(
tf.multiply(cross_entropy, float_mask + top_k_neg_mask), axis=1)
return tf.reduce_mean(class_loss / num_matched_boxes)
示例15: body
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import greater [as 别名]
def body(self, features):
self.has_actions = "input_action" in features
self.has_rewards = "target_reward" in features
self.has_policies = "target_policy" in features
self.has_values = "target_value" in features
hparams = self.hparams
def merge(inputs, targets):
"""Split inputs and targets into lists."""
inputs = tf.unstack(inputs, axis=1)
targets = tf.unstack(targets, axis=1)
assert len(inputs) == hparams.video_num_input_frames
assert len(targets) == hparams.video_num_target_frames
return inputs + targets
frames = merge(features["inputs"], features["targets"])
frames_raw = merge(features["inputs_raw"], features["targets_raw"])
actions, rewards = None, None
if self.has_actions:
actions = merge(features["input_action"], features["target_action"])
if self.has_rewards:
rewards = merge(features["input_reward"], features["target_reward"])
# Reset the internal states if the reset_internal_states has been
# passed as a feature and has greater value than 0.
if self.is_recurrent_model and self.internal_states is not None:
def reset_func():
reset_ops = flat_lists(self.reset_internal_states_ops())
with tf.control_dependencies(reset_ops):
return tf.no_op()
if self.is_predicting and "reset_internal_states" in features:
reset = features["reset_internal_states"]
reset = tf.greater(tf.reduce_sum(reset), 0.5)
reset_ops = tf.cond(reset, reset_func, tf.no_op)
else:
reset_ops = tf.no_op()
with tf.control_dependencies([reset_ops]):
frames[0] = tf.identity(frames[0])
with tf.control_dependencies([frames[0]]):
return self.__process(frames, actions, rewards, frames_raw)