本文整理汇总了Python中tensorflow.substr方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.substr方法的具体用法?Python tensorflow.substr怎么用?Python tensorflow.substr使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.substr方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _testElementWisePosLen
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import substr [as 别名]
def _testElementWisePosLen(self, dtype):
test_string = [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"]]
position = np.array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]], dtype)
length = np.array([[2, 3, 4],
[4, 3, 2],
[5, 5, 5]], dtype)
expected_value = [[b"en", b"eve", b"lve"],
[b"hirt", b"urt", b"te"],
[b"ixtee", b"vente", b"hteen"]]
substr_op = tf.substr(test_string, position, length)
with self.test_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
示例2: _testMismatchPosLenShapes
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import substr [as 别名]
def _testMismatchPosLenShapes(self, dtype):
test_string = [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"]]
position = np.array([[1, 2, 3]], dtype)
length = np.array([2, 3, 4], dtype)
# Should fail: position/length have different rank
with self.assertRaises(ValueError):
substr_op = tf.substr(test_string, position, length)
position = np.array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]], dtype)
length = np.array([[2, 3, 4]], dtype)
# Should fail: postion/length have different dimensionality
with self.assertRaises(ValueError):
substr_op = tf.substr(test_string, position, length)
示例3: _read_flow
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import substr [as 别名]
def _read_flow(filenames, num_epochs=None):
"""Given a list of filenames, constructs a reader op for ground truth flow files."""
filename_queue = tf.train.string_input_producer(filenames,
shuffle=False, capacity=len(filenames), num_epochs=num_epochs)
reader = tf.WholeFileReader()
_, value = reader.read(filename_queue)
value = tf.reshape(value, [1])
value_width = tf.substr(value, 4, 4)
value_height = tf.substr(value, 8, 4)
width = tf.reshape(tf.decode_raw(value_width, out_type=tf.int32), [])
height = tf.reshape(tf.decode_raw(value_height, out_type=tf.int32), [])
value_flow = tf.substr(value, 12, 8 * width * height)
flow = tf.decode_raw(value_flow, out_type=tf.float32)
flow = tf.reshape(flow, [height, width, 2])
mask = tf.to_float(tf.logical_and(flow[:, :, 0] < 1e9, flow[:, :, 1] < 1e9))
mask = tf.reshape(mask, [height, width, 1])
return flow, mask
示例4: _read_flow
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import substr [as 别名]
def _read_flow(filenames, num_epochs=None):
"""Given a list of filenames, constructs a reader op for ground truth flow files."""
filename_queue = tf.train.string_input_producer(filenames,
shuffle=False, capacity=len(filenames), num_epochs=num_epochs)
reader = tf.WholeFileReader()
_, value = reader.read(filename_queue)
value = tf.reshape(value, [1])
value_width = tf.substr(value, 4, 4)
value_height = tf.substr(value, 8, 4)
width = tf.reshape(tf.decode_raw(value_width, out_type=tf.int32), [])
height = tf.reshape(tf.decode_raw(value_height, out_type=tf.int32), [])
value_flow = tf.substr(value, 12, 8 * 436 * 1024)
flow = tf.decode_raw(value_flow, out_type=tf.float32)
return tf.reshape(flow, [436, 1024, 2])
示例5: read_semantic_gt
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import substr [as 别名]
def read_semantic_gt(self, image_path):
# tf.decode_image does not return the image size, this is an ugly workaround to handle both jpeg and png
path_length = string_length_tf(image_path)[0]
file_extension = tf.substr(image_path, path_length - 3, 3)
file_cond = tf.equal(file_extension, 'png')
image = tf.cond(file_cond, lambda: tf.image.decode_png(tf.read_file(image_path)), lambda: tf.zeros([self.params.height, self.params.width, 1], tf.uint8))
# if the dataset is cityscapes, we crop the last fifth to remove the car hood
if self.dataset == 'cityscapes':
o_height = tf.shape(image)[0]
crop_height = (o_height * 4) // 5
image = image[:crop_height,:,:]
image = tf.to_int32(tf.image.resize_images(image, [self.params.height, self.params.width], tf.image.ResizeMethod.NEAREST_NEIGHBOR))
valid = tf.cond(file_cond, lambda: tf.ones([self.params.height, self.params.width, 1], tf.float32), lambda: tf.zeros([self.params.height, self.params.width, 1], tf.float32))
return image, valid
示例6: read_image
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import substr [as 别名]
def read_image(self, image_path):
# tf.decode_image does not return the image size, this is an ugly workaround to handle both jpeg and png
path_length = string_length_tf(image_path)[0]
file_extension = tf.substr(image_path, path_length - 3, 3)
file_cond = tf.equal(file_extension, 'jpg')
image = tf.cond(file_cond, lambda: tf.image.decode_jpeg(tf.read_file(image_path)), lambda: tf.image.decode_png(tf.read_file(image_path)))
# if the dataset is cityscapes, we crop the last fifth to remove the car hood
if self.dataset == 'cityscapes':
o_height = tf.shape(image)[0]
crop_height = (o_height * 4) // 5
image = image[:crop_height,:,:]
image = tf.image.convert_image_dtype(image, tf.float32)
image = tf.image.resize_images(image, [self.params.height, self.params.width], tf.image.ResizeMethod.AREA)
return image
示例7: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import substr [as 别名]
def __init__(self, config, batch_size, one_hot=False):
self.lookup = None
reader = tf.TextLineReader()
filename_queue = tf.train.string_input_producer(["chargan.txt"])
key, x = reader.read(filename_queue)
vocabulary = self.get_vocabulary()
table = tf.contrib.lookup.string_to_index_table_from_tensor(
mapping = vocabulary, default_value = 0)
x = tf.string_join([x, tf.constant(" " * 64)])
x = tf.substr(x, [0], [64])
x = tf.string_split(x,delimiter='')
x = tf.sparse_tensor_to_dense(x, default_value=' ')
x = tf.reshape(x, [64])
x = table.lookup(x)
self.one_hot = one_hot
if one_hot:
x = tf.one_hot(x, len(vocabulary))
x = tf.cast(x, dtype=tf.float32)
x = tf.reshape(x, [1, int(x.get_shape()[0]), int(x.get_shape()[1]), 1])
else:
x = tf.cast(x, dtype=tf.float32)
x -= len(vocabulary)/2.0
x /= len(vocabulary)/2.0
x = tf.reshape(x, [1,1, 64, 1])
num_preprocess_threads = 8
x = tf.train.shuffle_batch(
[x],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity= 5000,
min_after_dequeue=500,
enqueue_many=True)
self.x = x
self.table = table
示例8: tf_startswith
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import substr [as 别名]
def tf_startswith(tensor, prefix, axis=None):
return tf.reduce_all(tf.equal(tf.substr(tensor, 0, len(prefix)), prefix), axis=axis)
# --------------------------------------------------------------------------
# File readers and writers
# --------------------------------------------------------------------------
示例9: _testScalarString
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import substr [as 别名]
def _testScalarString(self, dtype):
test_string = b"Hello"
position = np.array(1, dtype)
length = np.array(3, dtype)
expected_value = b"ell"
substr_op = tf.substr(test_string, position, length)
with self.test_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
示例10: _testVectorStrings
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import substr [as 别名]
def _testVectorStrings(self, dtype):
test_string = [b"Hello", b"World"]
position = np.array(1, dtype)
length = np.array(3, dtype)
expected_value = [b"ell", b"orl"]
substr_op = tf.substr(test_string, position, length)
with self.test_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
示例11: _testMatrixStrings
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import substr [as 别名]
def _testMatrixStrings(self, dtype):
test_string = [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"]]
position = np.array(1, dtype)
length = np.array(4, dtype)
expected_value = [[b"en", b"leve", b"welv"],
[b"hirt", b"ourt", b"ifte"],
[b"ixte", b"even", b"ight"]]
substr_op = tf.substr(test_string, position, length)
with self.test_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
示例12: testWrongDtype
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import substr [as 别名]
def testWrongDtype(self):
with self.test_session():
with self.assertRaises(TypeError):
tf.substr(b"test", 3.0, 1)
with self.assertRaises(TypeError):
tf.substr(b"test", 3, 1.0)
示例13: _load_corpus
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import substr [as 别名]
def _load_corpus(self, corpus_dir):
for fd in range(2, -1, -1):
file_list = []
if fd == 0:
file_dir = os.path.join(corpus_dir, AUG0_FOLDER)
elif fd == 1:
file_dir = os.path.join(corpus_dir, AUG1_FOLDER)
else:
file_dir = os.path.join(corpus_dir, AUG2_FOLDER)
for data_file in sorted(os.listdir(file_dir)):
full_path_name = os.path.join(file_dir, data_file)
if os.path.isfile(full_path_name) and data_file.lower().endswith('.txt'):
file_list.append(full_path_name)
assert len(file_list) > 0
dataset = tf.data.TextLineDataset(file_list)
src_dataset = dataset.filter(lambda line:
tf.logical_and(tf.size(line) > 0,
tf.equal(tf.substr(line, 0, 2), tf.constant('Q:'))))
src_dataset = src_dataset.map(lambda line:
tf.substr(line, 2, MAX_LEN)).prefetch(4096)
tgt_dataset = dataset.filter(lambda line:
tf.logical_and(tf.size(line) > 0,
tf.equal(tf.substr(line, 0, 2), tf.constant('A:'))))
tgt_dataset = tgt_dataset.map(lambda line:
tf.substr(line, 2, MAX_LEN)).prefetch(4096)
src_tgt_dataset = tf.data.Dataset.zip((src_dataset, tgt_dataset))
if fd == 1:
src_tgt_dataset = src_tgt_dataset.repeat(self.hparams.aug1_repeat_times)
elif fd == 2:
src_tgt_dataset = src_tgt_dataset.repeat(self.hparams.aug2_repeat_times)
if self.text_set is None:
self.text_set = src_tgt_dataset
else:
self.text_set = self.text_set.concatenate(src_tgt_dataset)
示例14: create_char_vectors_from_post
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import substr [as 别名]
def create_char_vectors_from_post(self, raw_post, mxlen):
char2index = self.index
if self.do_lowercase:
raw_post = self.lowercase(raw_post)
raw_post = tf.string_split(tf.reshape(raw_post, [-1]))
culled_word_token_vals = tf.substr(raw_post.values, 0, self.mxwlen)
char_tokens = tf.string_split(culled_word_token_vals, delimiter='')
char_indices = char2index.lookup(char_tokens)
return self.reshape_indices(char_indices, [mxlen, self.mxwlen])
示例15: generate_subword_feat
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import substr [as 别名]
def generate_subword_feat(sentence,
subword_vocab_index,
word_max_length,
subword_max_length,
subword_size,
word_sos,
word_eos,
word_placeholder_enable,
subword_pad):
def word_to_subword(word):
"""generate subwords for word"""
word_len = tf.size(tf.string_split([word], delimiter=''))
subwords = tf.substr([word], 0, subword_size)
for i in range(1, subword_max_length):
subwords = tf.cond(i+subword_size-1 < word_len,
lambda: tf.concat([subwords, tf.substr([word], i, subword_size)], 0),
lambda: subwords)
subwords = tf.concat([subwords[:subword_max_length],
tf.constant(subword_pad, shape=[subword_max_length])], axis=0)
subwords = tf.reshape(subwords[:subword_max_length], shape=[subword_max_length])
return subwords
"""generate subword feature for sentence"""
words = tf.string_split([sentence], delimiter=' ').values
if word_placeholder_enable == True:
words = tf.concat([[word_sos], words[:word_max_length], [word_eos],
tf.constant(subword_pad, shape=[word_max_length])], axis=0)
word_max_length = word_max_length + 2
else:
words = tf.concat([words[:word_max_length],
tf.constant(subword_pad, shape=[word_max_length])], axis=0)
words = tf.reshape(words[:word_max_length], shape=[word_max_length])
word_subwords = tf.map_fn(word_to_subword, words)
word_subwords = tf.cast(subword_vocab_index.lookup(word_subwords), dtype=tf.int32)
return word_subwords