本文整理汇总了Python中tensorflow.uint16方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.uint16方法的具体用法?Python tensorflow.uint16怎么用?Python tensorflow.uint16使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.uint16方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test16bit
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uint16 [as 别名]
def test16bit(self):
img_bytes = [[0, 255], [1024, 1024 + 255]]
# Encoded PNG bytes resulting from encoding the above img_bytes
# using go's image/png encoder.
encoded_bytes = [137, 80, 78, 71, 13, 10, 26, 10, 0, 0, 0, 13, 73, 72, 68,
82, 0, 0, 0, 2, 0, 0, 0, 2, 16, 0, 0, 0, 0, 7, 77, 142,
187, 0, 0, 0, 21, 73, 68, 65, 84, 120, 156, 98, 98, 96, 96,
248, 207, 194, 2, 36, 1, 1, 0, 0, 255, 255, 6, 60, 1, 10,
68, 160, 26, 131, 0, 0, 0, 0, 73, 69, 78, 68, 174, 66, 96,
130]
byte_string = bytes(bytearray(encoded_bytes))
img_in = tf.constant(byte_string, dtype=tf.string)
decode = tf.squeeze(tf.image.decode_png(img_in, dtype=tf.uint16))
with self.test_session():
decoded = decode.eval()
self.assertAllEqual(decoded, img_bytes)
示例2: testIntTypes
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uint16 [as 别名]
def testIntTypes(self):
for dtype, nptype in [
(tf.int32, np.int32),
(tf.uint8, np.uint8),
(tf.uint16, np.uint16),
(tf.int16, np.int16),
(tf.int8, np.int8)]:
# Test with array.
t = tensor_util.make_tensor_proto([10, 20, 30], dtype=dtype)
self.assertEquals(dtype, t.dtype)
self.assertProtoEquals("dim { size: 3 }", t.tensor_shape)
a = tensor_util.MakeNdarray(t)
self.assertEquals(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
# Test with ndarray.
t = tensor_util.make_tensor_proto(np.array([10, 20, 30], dtype=nptype))
self.assertEquals(dtype, t.dtype)
self.assertProtoEquals("dim { size: 3 }", t.tensor_shape)
a = tensor_util.MakeNdarray(t)
self.assertEquals(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
示例3: setUp
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uint16 [as 别名]
def setUp(self):
super(TPUEncodeTest, self).setUp()
self.data = (
# Supported on TPU
tf.random.uniform([128], maxval=100000, dtype=tf.int32),
# Not supported on TPU
tf.cast(
tf.random.uniform([128], maxval=65535, dtype=tf.int32), tf.uint16),
# Not supported on TPU
tf.cast(
tf.random.uniform([64, 84, 84, 4], maxval=256, dtype=tf.int32),
tf.uint8),
# Not supported on TPU
tf.cast(tf.random.uniform([1], maxval=256, dtype=tf.int32), tf.uint8),
# Not supported on TPU
tf.cast(
tf.random.uniform([100, 128, 1, 1, 1], maxval=256, dtype=tf.int32),
tf.uint8),
# Not supported on TPU
tf.cast(
tf.random.uniform([128, 100, 1, 1, 1], maxval=256, dtype=tf.int32),
tf.uint8),
)
示例4: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uint16 [as 别名]
def __init__(self, gray, isInstance=False):
self._sess = tf.Session()
self.isGrayscale = gray
self._png_data = tf.placeholder(dtype=tf.string)
if isInstance:
self._isInstance = True
self._image = tf.placeholder(dtype=tf.uint8)
self._decode_png = tf.image.decode_png(self._png_data, channels=0, dtype=tf.uint16)
self._decode_png = tf.image.resize_images(tf.cast(self._decode_png, tf.float32), size=[256, 512])
self._encode_png = tf.image.encode_png(self._image)
elif self.isGrayscale:
self._image = tf.placeholder(dtype=tf.uint8)
self._decode_png = tf.image.decode_png(self._png_data, channels=0)
self._decode_png = tf.image.resize_images(self._decode_png, size=[256, 512])
self._encode_png = tf.image.encode_png(self._image)
else:
self._image = tf.placeholder(dtype=tf.uint8)
self._decode_png = tf.image.decode_png(self._png_data, channels=3)
self._decode_png = tf.image.resize_images(self._decode_png, size=[256, 512])
self._encode_png = tf.image.encode_png(self._image)
示例5: load_tensorflow_image
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uint16 [as 别名]
def load_tensorflow_image(self, channel_label: str,
image_name: str) -> lt.LabeledTensor:
# All images will be cropped to this size.
crop_size = 1024
filename_op = tf.train.string_input_producer([self.data_path(image_name)])
wfr = tf.WholeFileReader()
_, encoded_png_op = wfr.read(filename_op)
image_op = tf.image.decode_png(
tf.reshape(encoded_png_op, shape=[]), channels=1, dtype=tf.uint16)
image_op = image_op[:crop_size, :crop_size, :]
image_op = tf.to_float(image_op) / np.iinfo(np.uint16).max
image_op = tf.reshape(image_op, [1, 1024, 1024, 1])
return lt.LabeledTensor(
image_op, ['batch', 'row', 'column', ('channel', [channel_label])])
示例6: test__dtype_to_bytes
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uint16 [as 别名]
def test__dtype_to_bytes():
np_tf_dt = [
(np.uint8, tf.uint8, b"uint8"),
(np.uint16, tf.uint16, b"uint16"),
(np.uint32, tf.uint32, b"uint32"),
(np.uint64, tf.uint64, b"uint64"),
(np.int8, tf.int8, b"int8"),
(np.int16, tf.int16, b"int16"),
(np.int32, tf.int32, b"int32"),
(np.int64, tf.int64, b"int64"),
(np.float16, tf.float16, b"float16"),
(np.float32, tf.float32, b"float32"),
(np.float64, tf.float64, b"float64"),
]
for npd, tfd, dt in np_tf_dt:
npd = np.dtype(npd)
assert tfrecord._dtype_to_bytes(npd) == dt
assert tfrecord._dtype_to_bytes(tfd) == dt
assert tfrecord._dtype_to_bytes("float32") == b"float32"
assert tfrecord._dtype_to_bytes("foobar") == b"foobar"
示例7: _convert_string_dtype
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uint16 [as 别名]
def _convert_string_dtype(dtype):
if dtype == 'float16':
return tf.float16
if dtype == 'float32':
return tf.float32
elif dtype == 'float64':
return tf.float64
elif dtype == 'int16':
return tf.int16
elif dtype == 'int32':
return tf.int32
elif dtype == 'int64':
return tf.int64
elif dtype == 'uint8':
return tf.int8
elif dtype == 'uint16':
return tf.uint16
else:
raise ValueError('Unsupported dtype:', dtype)
示例8: reduce_mean_support_empty
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uint16 [as 别名]
def reduce_mean_support_empty(input, keepdims=False):
return tf.cond(tf.size(input) > 0, lambda: tf.reduce_mean(input, keepdims=keepdims), lambda: tf.zeros_like(input))
# def bit_tensor_list(input):
# assert input.dtype in [tf.uint8, tf.uint16, tf.uint32, tf.uint64], 'unsupported data type, must be uint*'
# num_bits = 0
# if input.dtype == tf.int8:
# num_bits = 8
# elif input.dtype == tf.int16:
# num_bits = 16
# elif input.dtype == tf.uint32:
# num_bits = 32
# elif input.dtype == tf.uint64:
# num_bits = 64
# bit_tensors = []
# for i in range(num_bits):
# current_bit = 1 << i
# current_bit_tensor = tf.bitwise.bitwise_and(input, current_bit) == 1
# bit_tensors.append(current_bit_tensor)
# print(bit_tensors)
# return bit_tensors
示例9: read_and_decode_distillation
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uint16 [as 别名]
def read_and_decode_distillation(self, filename_queue):
img1_name = tf.string_join([self.img_dir, '/', filename_queue[0]])
img2_name = tf.string_join([self.img_dir, '/', filename_queue[1]])
img1 = tf.image.decode_png(tf.read_file(img1_name), channels=3)
img1 = tf.cast(img1, tf.float32)
img2 = tf.image.decode_png(tf.read_file(img2_name), channels=3)
img2 = tf.cast(img2, tf.float32)
flow_occ_fw_name = tf.string_join([self.fake_flow_occ_dir, '/flow_occ_fw_', filename_queue[2], '.png'])
flow_occ_bw_name = tf.string_join([self.fake_flow_occ_dir, '/flow_occ_bw_', filename_queue[2], '.png'])
flow_occ_fw = tf.image.decode_png(tf.read_file(flow_occ_fw_name), dtype=tf.uint16, channels=3)
flow_occ_fw = tf.cast(flow_occ_fw, tf.float32)
flow_occ_bw = tf.image.decode_png(tf.read_file(flow_occ_bw_name), dtype=tf.uint16, channels=3)
flow_occ_bw = tf.cast(flow_occ_bw, tf.float32)
flow_fw, occ_fw = self.extract_flow_and_mask(flow_occ_fw)
flow_bw, occ_bw = self.extract_flow_and_mask(flow_occ_bw)
return img1, img2, flow_fw, flow_bw, occ_fw, occ_bw
示例10: _load_sample
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uint16 [as 别名]
def _load_sample(self, files):
left_file_name = files[0]
right_file_name = files[1]
gt_file_name = files[2]
#read rgb images
left_image = read_image_from_disc(left_file_name)
right_image = read_image_from_disc(right_file_name)
#read gt
if self._usePfm:
gt_image = tf.py_func(lambda x: readPFM(x)[0], [gt_file_name], tf.float32)
gt_image.set_shape([None,None,1])
else:
read_type = tf.uint16 if self._double_prec_gt else tf.uint8
gt_image = read_image_from_disc(gt_file_name,shape=[None,None,1], dtype=read_type)
gt_image = tf.cast(gt_image,tf.float32)
if self._double_prec_gt:
gt_image = gt_image/256.0
#crop gt to fit with image (SGM adds some paddings who know why...)
gt_image = gt_image[:,:tf.shape(left_image)[1],:]
if self._resize_shape[0] is not None:
scale_factor = tf.cast(tf.shape(gt_image_left)[1],tf.float32)/float(self._resize_shape[1])
left_image = preprocessing.rescale_image(left_image,self._resize_shape)
right_image = preprocessing.rescale_image(right_image, self._resize_shape)
gt_image = tf.image.resize_nearest_neighbor(tf.expand_dims(gt_image,axis=0), self._resize_shape)[0]/scale_factor
if self._crop_shape[0] is not None:
if self._is_training:
left_image,right_image,gt_image = preprocessing.random_crop(self._crop_shape, [left_image,right_image,gt_image])
else:
(left_image,right_image,gt_image) = [tf.image.resize_image_with_crop_or_pad(x,self._crop_shape[0],self._crop_shape[1]) for x in [left_image,right_image,gt_image]]
if self._augment:
left_image,right_image=preprocessing.augment(left_image,right_image)
return [left_image,right_image,gt_image]
示例11: _build_input_pipeline
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uint16 [as 别名]
def _build_input_pipeline(self):
left_files, right_files, gt_files, _ = read_list_file(self._path_file)
self._couples = [[l, r, gt] for l, r, gt in zip(left_files, right_files, gt_files)]
#flags
self._usePfm = gt_files[0].endswith('pfm') or gt_files[0].endswith('PFM')
if not self._usePfm:
gg = cv2.imread(gt_files[0],-1)
self._double_prec_gt = (gg.dtype == np.uint16)
print('Input file loaded, starting to build input pipelines')
print('FLAGS:')
print('_usePfmGt',self._usePfm)
print('_double_prec_gt', self._double_prec_gt)
#create dataset
dataset = tf.data.Dataset.from_tensor_slices(self._couples).repeat(self._num_epochs)
if self._shuffle:
dataset = dataset.shuffle(self._batch_size*50)
#load images
dataset = dataset.map(self._load_sample)
#transform data
dataset = dataset.batch(self._batch_size, drop_remainder=True)
dataset = dataset.prefetch(buffer_size=30)
#get iterator and batches
iterator = dataset.make_one_shot_iterator()
images = iterator.get_next()
self._left_batch = images[0]
self._right_batch = images[1]
self._gt_batch = images[2]
################# PUBLIC METHOD #######################
示例12: _decode_gt
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uint16 [as 别名]
def _decode_gt(self, gt):
if self._usePfm:
gt_image_op = tf.py_func(lambda x: read_PFM(x)[0], [gt], tf.float32)
gt_image_op.set_shape([None,None,1])
else:
read_type = tf.uint16 if self._double_prec_gt else tf.uint8
gt_image_op = read_image_from_disc(gt,shape=[None,None,1], dtype=read_type)
gt_image_op = tf.cast(gt_image_op,tf.float32)
if self._double_prec_gt:
gt_image_op = gt_image_op/256.0
return gt_image_op
示例13: args_check
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uint16 [as 别名]
def args_check(cls, node, **kwargs):
supported_dtype = [
tf.bfloat16, tf.half, tf.float32, tf.float64, tf.uint8, tf.uint16,
tf.int8, tf.int16, tf.int32, tf.int64, tf.complex64, tf.complex128
]
x = kwargs["tensor_dict"][node.inputs[0]]
if x.dtype not in supported_dtype:
exception.OP_UNSUPPORTED_EXCEPT(
"CumSum input in " + str(x.dtype) + " which", "Tensorflow")
示例14: args_check
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uint16 [as 别名]
def args_check(cls, node, **kwargs):
unsupported_dtype = [
tf.int8, tf.int16, tf.uint8, tf.uint16, tf.uint32, tf.uint64
]
x = kwargs["tensor_dict"][node.inputs[0]]
y = kwargs["tensor_dict"][node.inputs[1]]
if x.dtype in unsupported_dtype:
exception.OP_UNSUPPORTED_EXCEPT("Mod Dividend in " + str(x.dtype),
"Tensorflow")
if y.dtype in unsupported_dtype:
exception.OP_UNSUPPORTED_EXCEPT("Mod Divisor in " + str(y.dtype),
"Tensorflow")
示例15: _common
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import uint16 [as 别名]
def _common(cls, node, **kwargs):
tensor_dict = kwargs["tensor_dict"]
x = tensor_dict[node.inputs[0]]
x_dtype = x.dtype
if cls.SINCE_VERSION < 11:
# min/max were required and passed as attributes
clip_value_min = node.attrs.get("min", tf.reduce_min(x))
clip_value_max = node.attrs.get("max", tf.reduce_max(x))
else:
# min/max are optional and passed as inputs
clip_value_min = tensor_dict[node.inputs[1]] if len(
node.inputs) > 1 and node.inputs[1] != "" else x_dtype.min
clip_value_max = tensor_dict[node.inputs[2]] if len(
node.inputs) > 2 and node.inputs[2] != "" else x_dtype.max
# tf.clip_by_value doesn't support uint8, uint16, uint32, int8 and int16
# dtype for x, therefore need to upcast it to tf.int32 or tf.int64
if x_dtype in [tf.uint8, tf.uint16, tf.uint32, tf.int8, tf.int16]:
cast_to = tf.int64 if x_dtype == tf.uint32 else tf.int32
x = tf.cast(x, cast_to)
clip_value_min = tf.cast(clip_value_min, cast_to)
clip_value_max = tf.cast(clip_value_max, cast_to)
y = tf.clip_by_value(x, clip_value_min, clip_value_max)
y = tf.cast(y, x_dtype)
else:
y = tf.clip_by_value(x, clip_value_min, clip_value_max)
return [y]