本文整理汇总了Python中tensorflow.string_to_number方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.string_to_number方法的具体用法?Python tensorflow.string_to_number怎么用?Python tensorflow.string_to_number使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.string_to_number方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: decode
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import string_to_number [as 别名]
def decode(self, data, items):
"""Decodes the data to return the tensors specified by the list of
items.
Args:
data: The scalar data to decode.
items: A list of strings, each of which is the name of the resulting
tensors to retrieve.
Returns:
A list of tensors, each of which corresponds to each item.
"""
data = tf.reshape(data, shape=[])
if data.dtype is tf.string:
decoded_data = tf.string_to_number(data, out_type=self._dtype)
else:
decoded_data = tf.cast(data, self._dtype)
outputs = {
self._data_name: decoded_data
}
return [outputs[item] for item in items]
示例2: _imagenet_load_file
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import string_to_number [as 别名]
def _imagenet_load_file(path, epochs=None, shuffle=True, seed=0, subset='train', prepare_path=True):
IMAGENET_ROOT = os.environ.get('IMAGENET_DIR', '')
if not isinstance(path, list):
path = [path]
filename_queue = tf.train.string_input_producer(path,
num_epochs=epochs, shuffle=shuffle, seed=seed)
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)
image_path, label_str = tf.decode_csv(value, record_defaults=[[''], ['']], field_delim=' ')
if prepare_path:
image_abspath = IMAGENET_ROOT + '/images/' + subset + image_path
else:
image_abspath = image_path
image_content = tf.read_file(image_abspath)
image = decode_image(image_content, channels=3)
image.set_shape([None, None, 3])
imgshape = tf.shape(image)[:2]
label = tf.string_to_number(label_str, out_type=tf.int32)
return image, label, imgshape, image_path
示例3: testToInt32
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import string_to_number [as 别名]
def testToInt32(self):
with self.test_session():
input_string = tf.placeholder(tf.string)
output = tf.string_to_number(
input_string,
out_type=tf.int32)
result = output.eval(feed_dict={
input_string: ["0", "3", "-1", " -10", "-2147483648", "2147483647"]
})
self.assertAllEqual([0, 3, -1, -10, -2147483648, 2147483647], result)
with self.assertRaisesOpError(_ERROR_MESSAGE + "2.9"):
output.eval(feed_dict={input_string: ["2.9"]})
# The next two exceed maximum value of int32.
for in_string in ["-2147483649", "2147483648"]:
with self.assertRaisesOpError(_ERROR_MESSAGE + in_string):
output.eval(feed_dict={input_string: [in_string]})
示例4: read_image_and_label
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import string_to_number [as 别名]
def read_image_and_label(image_label_q):
# Returns three Tensors: the decoded PNG image, the hour, and the minute.
filename, hour_str, minute_str = tf.decode_csv(
image_label_q.dequeue(), [[""], [""], [""]], " ")
file_contents = tf.read_file(filename)
# Decode image from PNG, and cast it to a float.
example = tf.image.decode_png(file_contents, channels=image_channels)
image = tf.cast(example, tf.float32)
# Set the tensor size manually from the image.
image.set_shape([image_size, image_size, image_channels])
# Do per-image whitening (zero mean, unit standard deviation). Without this,
# the learning algorithm diverges almost immediately because the gradient is
# too big.
image = tf.image.per_image_whitening(image)
# The label should be an integer.
hour = tf.string_to_number(hour_str, out_type=tf.int32)
minute = tf.string_to_number(minute_str, out_type=tf.int32)
return image, hour, minute
示例5: create_trg_dataset
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import string_to_number [as 别名]
def create_trg_dataset(input_dataset,
input_data_type,
word_vocab_index,
word_max_length,
word_pad,
word_sos,
word_eos,
word_placeholder_enable,
num_parallel):
"""create dataset for input target data"""
dataset = input_dataset
if input_data_type == "span":
dataset = dataset.map(lambda span: tf.string_split([span], delimiter='|').values, num_parallel_calls=num_parallel)
dataset = dataset.map(lambda span: tf.string_to_number(span, out_type=tf.int32), num_parallel_calls=num_parallel)
dataset = dataset.map(lambda span: tf.expand_dims(span, axis=-1), num_parallel_calls=num_parallel)
elif input_data_type == "text":
dataset = dataset.map(lambda sent: generate_word_feat(sent,
word_vocab_index, word_max_length, word_pad, word_sos, word_eos,
word_placeholder_enable), num_parallel_calls=num_parallel)
return dataset
示例6: test_dataset_map
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import string_to_number [as 别名]
def test_dataset_map(self, input_queue):
fname_1, fname_2, annotation_fname, samples_per_cat = input_queue[0],\
input_queue[1], input_queue[2], input_queue[3]
samples_per_cat = tf.string_to_number(samples_per_cat)
file_content = tf.read_file(fname_1)
image_1 = tf.image.decode_jpeg(file_content, channels=3)
image_1 = self.preprocess_image(image_1)
file_content = tf.read_file(fname_2)
image_2 = tf.image.decode_jpeg(file_content, channels=3)
image_2 = self.preprocess_image(image_2)
file_content = tf.read_file(annotation_fname)
seg_1 = tf.image.decode_jpeg(file_content, channels=1)
seg_1 = self.preprocess_mask(seg_1)
# Cropping preprocess
image_1 = self.central_cropping(image_1, self.test_crop)
image_2 = self.central_cropping(image_2, self.test_crop)
seg_1 = self.central_cropping(seg_1, self.test_crop)
return image_1, image_2, seg_1, fname_1, samples_per_cat
示例7: generator
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import string_to_number [as 别名]
def generator(ln):
splits = tf.string_split([ln], delimiter=',')
label = splits.values[0]
# 解析 dense 部分
features = {}
for i in range(1, 14):
features['I'+str(i)] = tf.string_to_number(splits.values[i], tf.int64)
return features, label
示例8: _get_dataset_next
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import string_to_number [as 别名]
def _get_dataset_next(self, files, config, batch_size):
def decode_func(value):
return [tf.string_to_number(value, out_type=tf.int32)]
dataset = dataset_builder.read_dataset(tf.data.TextLineDataset, files,
config)
dataset = dataset.map(decode_func)
dataset = dataset.batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
示例9: _get_dataset_next
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import string_to_number [as 别名]
def _get_dataset_next(self, files, config, batch_size):
def decode_func(value):
return [tf.string_to_number(value, out_type=tf.int32)]
dataset = dataset_util.read_dataset(
tf.data.TextLineDataset, decode_func, files, config)
dataset = dataset.batch(batch_size)
return dataset.make_one_shot_iterator().get_next()
示例10: read
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import string_to_number [as 别名]
def read(self, **data):
return {k: tf.string_to_number(v, tf.float32) for k, v in data.items()}
示例11: test_StringToNumber
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import string_to_number [as 别名]
def test_StringToNumber(self):
t = tf.string_to_number(list("0123456789"))
self.check(t)
#
# shapes and shaping
#
示例12: input_fn
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import string_to_number [as 别名]
def input_fn(filenames, batch_size=32, num_epochs=1, perform_shuffle=False):
print('Parsing', filenames)
def decode_libsvm(line):
columns = tf.string_split([line], ' ')
labels = tf.string_to_number(columns.values[0], out_type=tf.float32)
splits = tf.string_split(columns.values[1:], ':')
id_vals = tf.reshape(splits.values,splits.dense_shape)
feat_ids, feat_vals = tf.split(id_vals,num_or_size_splits=2,axis=1)
feat_ids = tf.string_to_number(feat_ids, out_type=tf.int32)
feat_vals = tf.string_to_number(feat_vals, out_type=tf.float32)
return {"feat_ids": feat_ids, "feat_vals": feat_vals}, labels
# Extract lines from input files using the Dataset API, can pass one filename or filename list
dataset = tf.data.TextLineDataset(filenames).map(decode_libsvm, num_parallel_calls=10).prefetch(1000)
# Randomizes input using a window of 256 elements (read into memory)
if perform_shuffle:
dataset = dataset.shuffle(buffer_size=256)
# epochs from blending together.
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size) # Batch size to use
iterator = dataset.make_one_shot_iterator()
batch_features, batch_labels = iterator.get_next()
return batch_features, batch_labels
示例13: postproc_annotation
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import string_to_number [as 别名]
def postproc_annotation(self, ann_filename, ann):
id_str = tf.string_split([ann_filename], ':').values[1]
id_ = tf.string_to_number(id_str, out_type=tf.int32)
ann_postproc = tf.cast(tf.equal(tf.cast(ann, tf.int32), id_), tf.uint8)
return ann_postproc
示例14: testToFloat
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import string_to_number [as 别名]
def testToFloat(self):
with self.test_session():
input_string = tf.placeholder(tf.string)
output = tf.string_to_number(
input_string,
out_type=tf.float32)
result = output.eval(feed_dict={
input_string: ["0",
"3",
"-1",
"1.12",
"0xF",
" -10.5",
"3.40282e+38",
# The next two exceed maximum value for float, so we
# expect +/-INF to be returned instead.
"3.40283e+38",
"-3.40283e+38",
"NAN",
"INF"]
})
self.assertAllClose([0, 3, -1, 1.12, 0xF, -10.5, 3.40282e+38,
float("INF"), float("-INF"), float("NAN"),
float("INF")], result)
with self.assertRaisesOpError(_ERROR_MESSAGE + "10foobar"):
output.eval(feed_dict={input_string: ["10foobar"]})
示例15: provide_data
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import string_to_number [as 别名]
def provide_data(self):
def decode(line):
fields = tf.string_split([line], self.field_delim).values
if self.index: # Skip index
fields = fields[1:]
fields = tf.regex_replace(fields, "|".join(self.na_values), "nan")
fields = tf.string_to_number(fields, tf.float32)
return fields
def fill_na(fields, fill_values):
fields = tf.where(tf.is_nan(fields), fill_values, fields)
return fields
dataset = tf.data.TextLineDataset(self.local_data_file)
if self.header: # Skip header
dataset = dataset.skip(1)
dataset = (
dataset.map(decode)
.map(lambda x: fill_na(x, self.data_schema.field_defaults))
.repeat()
.batch(self.batch_size)
)
iterator = dataset.make_one_shot_iterator()
batch = iterator.get_next()
batch = tf.reshape(batch, [self.batch_size, self.data_schema.field_num])
return batch