本文整理汇总了Python中tensorflow.VarLenFeature方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.VarLenFeature方法的具体用法?Python tensorflow.VarLenFeature怎么用?Python tensorflow.VarLenFeature使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.VarLenFeature方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_schema_txt_to_feature_spec
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import VarLenFeature [as 别名]
def test_schema_txt_to_feature_spec(self):
schema_txt = """
feature {
name: "test_feature"
value_count {
min: 1
max: 1
}
type: FLOAT
presence {
min_count: 1
}
}
""".encode("utf-8")
with NamedTemporaryFile() as f:
f.write(schema_txt)
f.flush()
os.fsync(f)
feature_spec = schema_txt_file_to_feature_spec(f.name)
self.assertEqual(feature_spec, {"test_feature": tf.VarLenFeature(dtype=tf.float32)})
示例2: _write_test_data
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import VarLenFeature [as 别名]
def _write_test_data():
schema = feature_spec_to_schema({"f0": tf.VarLenFeature(dtype=tf.int64),
"f1": tf.VarLenFeature(dtype=tf.int64),
"f2": tf.VarLenFeature(dtype=tf.int64)})
batches = [
[1, 4, None],
[2, None, None],
[3, 5, None],
[None, None, None],
]
example_proto = [example_pb2.Example(features=feature_pb2.Features(feature={
"f" + str(i): feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=[f]))
for i, f in enumerate(batch) if f is not None
})) for batch in batches]
return DataUtil.write_test_data(example_proto, schema)
示例3: _extract_features_batch
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import VarLenFeature [as 别名]
def _extract_features_batch(self, serialized_batch):
features = tf.parse_example(
serialized_batch,
features={'images': tf.FixedLenFeature([], tf.string),
'imagepaths': tf.FixedLenFeature([], tf.string),
'labels': tf.VarLenFeature(tf.int64),
})
bs = features['images'].shape[0]
images = tf.decode_raw(features['images'], tf.uint8)
w, h = tuple(CFG.ARCH.INPUT_SIZE)
images = tf.cast(x=images, dtype=tf.float32)
#images = tf.subtract(tf.divide(images, 128.0), 1.0)
images = tf.reshape(images, [bs, h, -1, CFG.ARCH.INPUT_CHANNELS])
labels = features['labels']
labels = tf.cast(labels, tf.int32)
imagepaths = features['imagepaths']
return images, labels, imagepaths
开发者ID:Mingtzge,项目名称:2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement,代码行数:23,代码来源:read_tfrecord.py
示例4: prepare_serialized_examples
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import VarLenFeature [as 别名]
def prepare_serialized_examples(self, serialized_examples):
# set the mapping from the fields to data types in the proto
num_features = len(self.feature_names)
assert num_features > 0, "self.feature_names is empty!"
assert len(self.feature_names) == len(self.feature_sizes), \
"length of feature_names (={}) != length of feature_sizes (={})".format( \
len(self.feature_names), len(self.feature_sizes))
feature_map = {"video_id": tf.FixedLenFeature([], tf.string),
"labels": tf.VarLenFeature(tf.int64)}
for feature_index in range(num_features):
feature_map[self.feature_names[feature_index]] = tf.FixedLenFeature(
[self.feature_sizes[feature_index]], tf.float32)
features = tf.parse_example(serialized_examples, features=feature_map)
labels = tf.sparse_to_indicator(features["labels"], self.num_classes)
labels.set_shape([None, self.num_classes])
concatenated_features = tf.concat([
features[feature_name] for feature_name in self.feature_names], 1)
return features["video_id"], concatenated_features, labels, tf.ones([tf.shape(serialized_examples)[0]])
示例5: _make_schema
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import VarLenFeature [as 别名]
def _make_schema(columns, types, default_values):
"""Input schema definition.
Args:
columns: column names for fields appearing in input.
types: column types for fields appearing in input.
default_values: default values for fields appearing in input.
Returns:
feature_set dictionary of string to *Feature.
"""
result = {}
assert len(columns) == len(types)
assert len(columns) == len(default_values)
for c, t, v in zip(columns, types, default_values):
if isinstance(t, list):
result[c] = tf.VarLenFeature(dtype=t[0])
else:
result[c] = tf.FixedLenFeature(shape=[], dtype=t, default_value=v)
return dataset_schema.from_feature_spec(result)
示例6: prepare_reader
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import VarLenFeature [as 别名]
def prepare_reader(self, filename_queue, batch_size=1024):
reader = tf.TFRecordReader()
_, serialized_examples = reader.read_up_to(filename_queue, batch_size)
# set the mapping from the fields to data types in the proto
num_features = len(self.feature_names)
assert num_features > 0, "self.feature_names is empty!"
assert len(self.feature_names) == len(self.feature_sizes), \
"length of feature_names (={}) != length of feature_sizes (={})".format( \
len(self.feature_names), len(self.feature_sizes))
feature_map = {"video_id": tf.FixedLenFeature([], tf.string),
"labels": tf.VarLenFeature(tf.int64)}
for feature_index in range(num_features):
feature_map[self.feature_names[feature_index]] = tf.FixedLenFeature(
[self.feature_sizes[feature_index]], tf.float32)
features = tf.parse_example(serialized_examples, features=feature_map)
labels = tf.sparse_to_indicator(features["labels"], self.num_classes)
labels.set_shape([None, self.num_classes])
concatenated_features = tf.concat([
features[feature_name] for feature_name in self.feature_names], 1)
return features["video_id"], concatenated_features, labels, tf.ones([tf.shape(serialized_examples)[0]])
示例7: example_parser
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import VarLenFeature [as 别名]
def example_parser(example_serialized):
feature_map = {
'image/encoded': tf.FixedLenFeature([], dtype=tf.string, default_value=''),
'image/timestamp': tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),
'steer/angle': tf.FixedLenFeature([2], dtype=tf.float32, default_value=[0.0, 0.0]),
'steer/timestamp': tf.FixedLenFeature([2], dtype=tf.int64, default_value=[-1, -1]),
#'gps/lat': tf.FixedLenFeature([2], dtype=tf.float32, default_value=[0.0, 0.00]),
#'gps/long': tf.FixedLenFeature([2], dtype=tf.float32, default_value=[0.0, 0.0]),
#'gps/timestamp': tf.VarLenFeature(tf.int64),
}
features = tf.parse_single_example(example_serialized, feature_map)
image_timestamp = tf.cast(features['image/timestamp'], dtype=tf.int64)
steering_angles = features['steer/angle']
steering_timestamps = features['steer/timestamp']
return features['image/encoded'], image_timestamp, steering_angles, steering_timestamps
示例8: load_tfrecord
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import VarLenFeature [as 别名]
def load_tfrecord(self):
opts = self._options
file_names = glob(opts.train_dir + '/output.tfrecord')
file_queue = tf.train.string_input_producer(file_names,
num_epochs=opts.epochs_to_train)
reader = tf.TFRecordReader()
_, record_string = reader.read(file_queue)
features = {'sentence': tf.VarLenFeature(tf.int64)}
one_line_example = tf.parse_single_example(record_string, features=features)
capacity = PRELOAD_LINES
batch_lines = tf.train.batch(one_line_example,
batch_size=BATCH_LINES,
capacity=capacity,
num_threads=opts.io_threads)
corpus_slice = batch_lines['sentence'].values
return corpus_slice
示例9: parse_example_batch
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import VarLenFeature [as 别名]
def parse_example_batch(serialized):
"""Parses a batch of tf.Example protos.
Args:
serialized: A 1-D string Tensor; a batch of serialized tf.Example protos.
Returns:
encode: A SentenceBatch of encode sentences.
decode_pre: A SentenceBatch of "previous" sentences to decode.
decode_post: A SentenceBatch of "post" sentences to decode.
"""
features = tf.parse_example(
serialized,
features={
"encode": tf.VarLenFeature(dtype=tf.int64),
"decode_pre": tf.VarLenFeature(dtype=tf.int64),
"decode_post": tf.VarLenFeature(dtype=tf.int64),
})
def _sparse_to_batch(sparse):
ids = tf.sparse_tensor_to_dense(sparse) # Padding with zeroes.
mask = tf.sparse_to_dense(sparse.indices, sparse.dense_shape,
tf.ones_like(sparse.values, dtype=tf.int32))
return SentenceBatch(ids=ids, mask=mask)
output_names = ("encode", "decode_pre", "decode_post")
return tuple(_sparse_to_batch(features[x]) for x in output_names)
示例10: _count_matrix_input
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import VarLenFeature [as 别名]
def _count_matrix_input(self, filenames, submatrix_rows, submatrix_cols):
"""Creates ops that read submatrix shards from disk."""
random.shuffle(filenames)
filename_queue = tf.train.string_input_producer(filenames)
reader = tf.WholeFileReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'global_row': tf.FixedLenFeature([submatrix_rows], dtype=tf.int64),
'global_col': tf.FixedLenFeature([submatrix_cols], dtype=tf.int64),
'sparse_local_row': tf.VarLenFeature(dtype=tf.int64),
'sparse_local_col': tf.VarLenFeature(dtype=tf.int64),
'sparse_value': tf.VarLenFeature(dtype=tf.float32)
})
global_row = features['global_row']
global_col = features['global_col']
sparse_local_row = features['sparse_local_row'].values
sparse_local_col = features['sparse_local_col'].values
sparse_count = features['sparse_value'].values
sparse_indices = tf.concat(
axis=1, values=[tf.expand_dims(sparse_local_row, 1),
tf.expand_dims(sparse_local_col, 1)])
count = tf.sparse_to_dense(sparse_indices, [submatrix_rows, submatrix_cols],
sparse_count)
return global_row, global_col, count
示例11: example_reading_spec
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import VarLenFeature [as 别名]
def example_reading_spec(self):
data_fields = {
"inputs": tf.VarLenFeature(tf.int64),
"targets": tf.VarLenFeature(tf.int64),
"floats": tf.VarLenFeature(tf.float32),
}
data_items_to_decoders = None
return (data_fields, data_items_to_decoders)
示例12: example_reading_spec
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import VarLenFeature [as 别名]
def example_reading_spec(self):
label_key = "image/unpadded_label"
data_fields, data_items_to_decoders = (
super(ImageFSNS, self).example_reading_spec())
data_fields[label_key] = tf.VarLenFeature(tf.int64)
data_items_to_decoders[
"targets"] = tf.contrib.slim.tfexample_decoder.Tensor(label_key)
return data_fields, data_items_to_decoders
示例13: example_reading_spec
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import VarLenFeature [as 别名]
def example_reading_spec(self):
data_fields = {"dist_targets": tf.VarLenFeature(tf.int64)}
if self.has_inputs:
data_fields["inputs"] = tf.VarLenFeature(tf.int64)
# hack: ignoring true targets and putting dist_targets in targets
data_items_to_decoders = {
"inputs": tf.contrib.slim.tfexample_decoder.Tensor("inputs"),
"targets": tf.contrib.slim.tfexample_decoder.Tensor("dist_targets"),
}
return (data_fields, data_items_to_decoders)
示例14: example_reading_spec
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import VarLenFeature [as 别名]
def example_reading_spec(self):
data_fields = {
"inputs": tf.VarLenFeature(tf.int64),
"targets": tf.VarLenFeature(tf.int64),
"section_boundaries": tf.VarLenFeature(tf.int64),
}
data_items_to_decoders = None
return (data_fields, data_items_to_decoders)
示例15: example_reading_spec
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import VarLenFeature [as 别名]
def example_reading_spec(self):
data_fields = {
"inputs": tf.VarLenFeature(tf.float32),
"targets": tf.VarLenFeature(tf.float32),
}
data_items_to_decoders = None
return (data_fields, data_items_to_decoders)