本文整理匯總了Python中tensorflow.compat.v1.Assert方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.Assert方法的具體用法?Python v1.Assert怎麽用?Python v1.Assert使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.Assert方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: pad_to_fixed_size
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Assert [as 別名]
def pad_to_fixed_size(data, pad_value, output_shape):
"""Pad data to a fixed length at the first dimension.
Args:
data: Tensor to be padded to output_shape.
pad_value: A constant value assigned to the paddings.
output_shape: The output shape of a 2D tensor.
Returns:
The Padded tensor with output_shape [max_num_instances, dimension].
"""
max_num_instances = output_shape[0]
dimension = output_shape[1]
data = tf.reshape(data, [-1, dimension])
num_instances = tf.shape(data)[0]
assert_length = tf.Assert(
tf.less_equal(num_instances, max_num_instances), [num_instances])
with tf.control_dependencies([assert_length]):
pad_length = max_num_instances - num_instances
paddings = pad_value * tf.ones([pad_length, dimension])
padded_data = tf.concat([data, paddings], axis=0)
padded_data = tf.reshape(padded_data, output_shape)
return padded_data
示例2: assert_box_normalized
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Assert [as 別名]
def assert_box_normalized(boxes, maximum_normalized_coordinate=1.1):
"""Asserts the input box tensor is normalized.
Args:
boxes: a tensor of shape [N, 4] where N is the number of boxes.
maximum_normalized_coordinate: Maximum coordinate value to be considered
as normalized, default to 1.1.
Returns:
a tf.Assert op which fails when the input box tensor is not normalized.
Raises:
ValueError: When the input box tensor is not normalized.
"""
box_minimum = tf.reduce_min(boxes)
box_maximum = tf.reduce_max(boxes)
return tf.Assert(
tf.logical_and(
tf.less_equal(box_maximum, maximum_normalized_coordinate),
tf.greater_equal(box_minimum, 0)),
[boxes])
示例3: create_test_network_7
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Assert [as 別名]
def create_test_network_7():
"""Aligned network for test, with a control dependency.
The graph is similar to create_test_network_1(), except that it includes an
assert operation on the left branch.
Returns:
g: Tensorflow graph object (Graph proto).
"""
g = tf.Graph()
with g.as_default():
# An 8x8 test image.
x = tf.placeholder(tf.float32, (1, 8, 8, 1), name='input_image')
# Left branch.
l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
l1_shape = tf.shape(l1)
assert_op = tf.Assert(tf.equal(l1_shape[1], 2), [l1_shape], summarize=4)
# Right branch.
l2_pad = tf.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]])
l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='VALID')
# Addition.
with tf.control_dependencies([assert_op]):
tf.nn.relu(l1 + l3, name='output')
return g
示例4: test_assert_true
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Assert [as 別名]
def test_assert_true():
g = tf.Graph()
shape = (1, 2)
with g.as_default():
x = tf.placeholder(tf.float32, shape=shape, name="input")
assert_op = tf.Assert(tf.reduce_all(tf.less_equal(x, x)), ["it failed"])
with tf.Session() as sess:
x_value = np.random.rand(*shape)
assert sess.run(assert_op, feed_dict={x: x_value}) is None
# In TVM, tf.assert is converted to a no-op which is actually a 0,
# though it should probably be none or an empty tuple.
#
# ToDo: It appears that the frontend converter gets confused here and
# entirely eliminates all operands from main(). Likely because x <= x
# is always true, so the placeholder can be eliminated. But TF doesn't
# do that, it's happening in Relay, and that optimization shouldn't
# affect the arity of the main function. We should have to pass in
# x_value here.
np.testing.assert_allclose(0, run_relay(g, {'input': shape}).asnumpy())
示例5: test_assert_true_var_capture
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Assert [as 別名]
def test_assert_true_var_capture():
g = tf.Graph()
with g.as_default():
x = tf.placeholder(tf.float32, shape=())
# It turns out that tf.assert() creates a large and complex subgraph if
# you capture a variable as part of the error message. So we need to
# test that, too.
assert_op = tf.Assert(tf.less_equal(x, x), ["it failed", x])
with tf.Session() as sess:
x_value = np.random.rand()
assert sess.run(assert_op, feed_dict={x: x_value}) is None
# TODO: The frontend converter notes the output of
# the graph as a boolean, which is not correct - as you can see above,
# TF believes that the value of this graph is None.
np.testing.assert_allclose(True,
run_relay(g, None, x_value).asnumpy())
示例6: test_assert_false
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Assert [as 別名]
def test_assert_false():
g = tf.Graph()
with g.as_default():
assert_op = tf.Assert(tf.constant(False), ["it failed"])
with tf.Session() as sess:
try:
print(sess.run(assert_op))
assert False # TF should have thrown an exception
except tf.errors.InvalidArgumentError as e:
assert "it failed" in e.message
# In TVM, tf.assert is converted to a no-op which is actually a 0,
# though it should probably be none or an empty tuple. For the same
# reason, there should not be an error here, even though the assertion
# argument is false.
np.testing.assert_allclose(0, run_relay(g).asnumpy())
示例7: _get_cubic_root
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Assert [as 別名]
def _get_cubic_root(self):
"""Get the cubic root."""
# We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2
# where x = sqrt(mu).
# We substitute x, which is sqrt(mu), with x = y + 1.
# It gives y^3 + py = q
# where p = (D^2 h_min^2)/(2*C) and q = -p.
# We use the Vieta's substitution to compute the root.
# There is only one real solution y (which is in [0, 1] ).
# http://mathworld.wolfram.com/VietasSubstitution.html
assert_array = [
tf.Assert(
tf.logical_not(tf.is_nan(self._dist_to_opt_avg)),
[self._dist_to_opt_avg,]),
tf.Assert(
tf.logical_not(tf.is_nan(self._h_min)),
[self._h_min,]),
tf.Assert(
tf.logical_not(tf.is_nan(self._grad_var)),
[self._grad_var,]),
tf.Assert(
tf.logical_not(tf.is_inf(self._dist_to_opt_avg)),
[self._dist_to_opt_avg,]),
tf.Assert(
tf.logical_not(tf.is_inf(self._h_min)),
[self._h_min,]),
tf.Assert(
tf.logical_not(tf.is_inf(self._grad_var)),
[self._grad_var,])
]
with tf.control_dependencies(assert_array):
p = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var
w3 = (-tf.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0
w = tf.sign(w3) * tf.pow(tf.abs(w3), 1.0/3.0)
y = w - p / 3.0 / w
x = y + 1
return x
示例8: _crop
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Assert [as 別名]
def _crop(image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: `Tensor` image of shape [height, width, channels].
offset_height: `Tensor` indicating the height offset.
offset_width: `Tensor` indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
the cropped (and resized) image.
Raises:
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3), ["Rank of image must be equal to 3."])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
["Crop size greater than the image size."])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
return tf.reshape(image, cropped_shape)
示例9: pad_to_length
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Assert [as 別名]
def pad_to_length(tensor, target_length):
"""Pads a 1-D Tensor with zeros to the target length."""
pad_amt = target_length - tf.size(tensor)
# Assert that pad_amt is non-negative.
assert_op = tf.Assert(pad_amt >= 0,
["\nERROR: len(tensor) > target_length.", pad_amt])
with tf.control_dependencies([assert_op]):
padded = tf.pad(tensor, [[0, pad_amt]])
padded.set_shape([target_length])
return padded
示例10: _crop
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Assert [as 別名]
def _crop(image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: an image of shape [height, width, channels].
offset_height: a scalar tensor indicating the height offset.
offset_width: a scalar tensor indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
the cropped (and resized) image.
Raises:
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
return tf.reshape(image, cropped_shape)
示例11: assert_or_prune_invalid_boxes
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Assert [as 別名]
def assert_or_prune_invalid_boxes(boxes):
"""Makes sure boxes have valid sizes (ymax >= ymin, xmax >= xmin).
When the hardware supports assertions, the function raises an error when
boxes have an invalid size. If assertions are not supported (e.g. on TPU),
boxes with invalid sizes are filtered out.
Args:
boxes: float tensor of shape [num_boxes, 4]
Returns:
boxes: float tensor of shape [num_valid_boxes, 4] with invalid boxes
filtered out.
Raises:
tf.errors.InvalidArgumentError: When we detect boxes with invalid size.
This is not supported on TPUs.
"""
ymin, xmin, ymax, xmax = tf.split(
boxes, num_or_size_splits=4, axis=1)
height_check = tf.Assert(tf.reduce_all(ymax >= ymin), [ymin, ymax])
width_check = tf.Assert(tf.reduce_all(xmax >= xmin), [xmin, xmax])
with tf.control_dependencies([height_check, width_check]):
boxes_tensor = tf.concat([ymin, xmin, ymax, xmax], axis=1)
boxlist = box_list.BoxList(boxes_tensor)
# TODO(b/149221748) Remove pruning when XLA supports assertions.
boxlist = box_list_ops.prune_small_boxes(boxlist, 0)
return boxlist.get()
示例12: check_min_image_dim
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Assert [as 別名]
def check_min_image_dim(min_dim, image_tensor):
"""Checks that the image width/height are greater than some number.
This function is used to check that the width and height of an image are above
a certain value. If the image shape is static, this function will perform the
check at graph construction time. Otherwise, if the image shape varies, an
Assertion control dependency will be added to the graph.
Args:
min_dim: The minimum number of pixels along the width and height of the
image.
image_tensor: The image tensor to check size for.
Returns:
If `image_tensor` has dynamic size, return `image_tensor` with a Assert
control dependency. Otherwise returns image_tensor.
Raises:
ValueError: if `image_tensor`'s' width or height is smaller than `min_dim`.
"""
image_shape = image_tensor.get_shape()
image_height = static_shape.get_height(image_shape)
image_width = static_shape.get_width(image_shape)
if image_height is None or image_width is None:
shape_assert = tf.Assert(
tf.logical_and(tf.greater_equal(tf.shape(image_tensor)[1], min_dim),
tf.greater_equal(tf.shape(image_tensor)[2], min_dim)),
['image size must be >= {} in both height and width.'.format(min_dim)])
with tf.control_dependencies([shape_assert]):
return tf.identity(image_tensor)
if image_height < min_dim or image_width < min_dim:
raise ValueError(
'image size must be >= %d in both height and width; image dim = %d,%d' %
(min_dim, image_height, image_width))
return image_tensor
示例13: _get_shape
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Assert [as 別名]
def _get_shape(tensor, num_dims):
tf.Assert(tensor.get_shape().ndims == num_dims, [tensor])
return shape_utils.combined_static_and_dynamic_shape(tensor)
示例14: to_normalized_coordinates
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Assert [as 別名]
def to_normalized_coordinates(keypoints, height, width,
check_range=True, scope=None):
"""Converts absolute keypoint coordinates to normalized coordinates in [0, 1].
Usually one uses the dynamic shape of the image or conv-layer tensor:
keypoints = keypoint_ops.to_normalized_coordinates(keypoints,
tf.shape(images)[1],
tf.shape(images)[2]),
This function raises an assertion failed error at graph execution time when
the maximum coordinate is smaller than 1.01 (which means that coordinates are
already normalized). The value 1.01 is to deal with small rounding errors.
Args:
keypoints: A tensor of shape [num_instances, num_keypoints, 2].
height: Maximum value for y coordinate of absolute keypoint coordinates.
width: Maximum value for x coordinate of absolute keypoint coordinates.
check_range: If True, checks if the coordinates are normalized.
scope: name scope.
Returns:
tensor of shape [num_instances, num_keypoints, 2] with normalized
coordinates in [0, 1].
"""
with tf.name_scope(scope, 'ToNormalizedCoordinates'):
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
if check_range:
max_val = tf.reduce_max(keypoints)
max_assert = tf.Assert(tf.greater(max_val, 1.01),
['max value is lower than 1.01: ', max_val])
with tf.control_dependencies([max_assert]):
width = tf.identity(width)
return scale(keypoints, 1.0 / height, 1.0 / width)
示例15: to_absolute_coordinates
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Assert [as 別名]
def to_absolute_coordinates(keypoints, height, width,
check_range=True, scope=None):
"""Converts normalized keypoint coordinates to absolute pixel coordinates.
This function raises an assertion failed error when the maximum keypoint
coordinate value is larger than 1.01 (in which case coordinates are already
absolute).
Args:
keypoints: A tensor of shape [num_instances, num_keypoints, 2]
height: Maximum value for y coordinate of absolute keypoint coordinates.
width: Maximum value for x coordinate of absolute keypoint coordinates.
check_range: If True, checks if the coordinates are normalized or not.
scope: name scope.
Returns:
tensor of shape [num_instances, num_keypoints, 2] with absolute coordinates
in terms of the image size.
"""
with tf.name_scope(scope, 'ToAbsoluteCoordinates'):
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
# Ensure range of input keypoints is correct.
if check_range:
max_val = tf.reduce_max(keypoints)
max_assert = tf.Assert(tf.greater_equal(1.01, max_val),
['maximum keypoint coordinate value is larger '
'than 1.01: ', max_val])
with tf.control_dependencies([max_assert]):
width = tf.identity(width)
return scale(keypoints, height, width)