本文整理汇总了Python中tensorflow.python.ops.image_ops.convert_image_dtype函数的典型用法代码示例。如果您正苦于以下问题:Python convert_image_dtype函数的具体用法?Python convert_image_dtype怎么用?Python convert_image_dtype使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了convert_image_dtype函数的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _adjust_saturation
def _adjust_saturation(self, image, saturation_factor):
image = ops.convert_to_tensor(image, name="image")
orig_dtype = image.dtype
flt_image = image_ops.convert_image_dtype(image, dtypes.float32)
with self.test_scope():
saturation_adjusted_image = gen_image_ops.adjust_saturation(
flt_image, saturation_factor)
return image_ops.convert_image_dtype(saturation_adjusted_image, orig_dtype)
示例2: testNoConvert
def testNoConvert(self):
# Make sure converting to the same data type creates only an identity op
with self.test_session():
image = constant_op.constant([1], dtype=dtypes.uint8)
image_ops.convert_image_dtype(image, dtypes.uint8)
y = image_ops.convert_image_dtype(image, dtypes.uint8)
self.assertEquals(y.op.type, 'Identity')
self.assertEquals(y.op.inputs[0], image)
示例3: _testContrast
def _testContrast(self, x_np, y_np, contrast_factor):
with self.test_session():
x = array_ops.placeholder(x_np.dtype, shape=x_np.shape)
flt_x = image_ops.convert_image_dtype(x, dtypes.float32)
with self.test_scope():
y = image_ops.adjust_contrast(flt_x, contrast_factor)
y = image_ops.convert_image_dtype(y, x.dtype, saturate=True)
y_tf = y.eval({x: x_np})
self.assertAllClose(y_tf, y_np, 1e-6)
示例4: testBatchAdjustHue
def testBatchAdjustHue(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.test_session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
flt_x = image_ops.convert_image_dtype(x, dtypes.float32)
with self.test_scope():
y = gen_image_ops.adjust_hue(flt_x, delta)
y = image_ops.convert_image_dtype(y, x.dtype, saturate=True)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
示例5: _convert
def _convert(self, original, original_dtype, output_dtype, expected):
x_np = np.array(original, dtype=original_dtype.as_numpy_dtype())
y_np = np.array(expected, dtype=output_dtype.as_numpy_dtype())
with self.test_session():
image = constant_op.constant(x_np)
y = image_ops.convert_image_dtype(image, output_dtype)
self.assertTrue(y.dtype == output_dtype)
self.assertAllClose(y.eval(), y_np, atol=1e-5)
示例6: adjust_hsv_in_yiq
def adjust_hsv_in_yiq(image,
delta_hue=0,
scale_saturation=1,
scale_value=1,
name=None):
"""Adjust hue, saturation, value of an RGB image in YIQ color space.
This is a convenience method that converts an RGB image to float
representation, converts it to YIQ, rotates the color around the Y channel by
delta_hue in radians, scales the chrominance channels (I, Q) by
scale_saturation, scales all channels (Y, I, Q) by scale_value,
converts back to RGB, and then back to the original data type.
`image` is an RGB image. The image hue is adjusted by converting the
image to YIQ, rotating around the luminance channel (Y) by
`delta_hue` in radians, multiplying the chrominance channels (I, Q) by
`scale_saturation`, and multiplying all channels (Y, I, Q) by
`scale_value`. The image is then converted back to RGB.
Args:
image: RGB image or images. Size of the last dimension must be 3.
delta_hue: float, the hue rotation amount, in radians.
scale_saturation: float, factor to multiply the saturation by.
scale_value: float, factor to multiply the value by.
name: A name for this operation (optional).
Returns:
Adjusted image(s), same shape and DType as `image`.
"""
with ops.name_scope(name, 'adjust_hsv_in_yiq', [image]) as name:
image = ops.convert_to_tensor(image, name='image')
# Remember original dtype to so we can convert back if needed
orig_dtype = image.dtype
flt_image = image_ops.convert_image_dtype(image, dtypes.float32)
rgb_altered = _distort_image_ops.adjust_hsv_in_yiq(
flt_image, delta_hue, scale_saturation, scale_value)
return image_ops.convert_image_dtype(rgb_altered, orig_dtype)
示例7: preprocess_image
def preprocess_image(
image, height=INCEPTION_V3_DEFAULT_IMG_SIZE,
width=INCEPTION_V3_DEFAULT_IMG_SIZE, central_fraction=0.875, scope=None):
"""Prepare one image for evaluation.
If height and width are specified it would output an image with that size by
applying resize_bilinear.
If central_fraction is specified it would crop the central fraction of the
input image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
central_fraction: Optional Float, fraction of the image to crop.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
"""
with ops.name_scope(scope, 'eval_image', [image, height, width]):
if image.dtype != dtypes.float32:
image = image_ops.convert_image_dtype(image, dtype=dtypes.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
image = image_ops.central_crop(image, central_fraction=central_fraction)
# Resize the image to the specified height and width.
image = array_ops.expand_dims(image, 0)
image = image_ops.resize_bilinear(image, [height, width],
align_corners=False)
image = array_ops.squeeze(image, [0])
image = (image - 0.5) * 2.0
return image
示例8: testNoConvert
def testNoConvert(self):
# Make sure converting to the same data type creates no ops
with self.test_session():
image = constant_op.constant([1], dtype=dtypes.uint8)
y = image_ops.convert_image_dtype(image, dtypes.uint8)
self.assertEquals(image, y)