本文整理汇总了Python中tensorflow.compat.v1.Example方法的典型用法代码示例。如果您正苦于以下问题:Python v1.Example方法的具体用法?Python v1.Example怎么用?Python v1.Example使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.Example方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: to_example
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Example [as 别名]
def to_example(dictionary):
"""Helper: build tf.Example from (string -> int/float/str list) dictionary."""
features = {}
for (k, v) in six.iteritems(dictionary):
if not v:
raise ValueError("Empty generated field: %s" % str((k, v)))
# Subtly in PY2 vs PY3, map is not scriptable in py3. As a result,
# map objects will fail with TypeError, unless converted to a list.
if six.PY3 and isinstance(v, map):
v = list(v)
if (isinstance(v[0], six.integer_types) or
np.issubdtype(type(v[0]), np.integer)):
features[k] = tf.train.Feature(int64_list=tf.train.Int64List(value=v))
elif isinstance(v[0], float):
features[k] = tf.train.Feature(float_list=tf.train.FloatList(value=v))
elif isinstance(v[0], six.string_types):
if not six.PY2: # Convert in python 3.
v = [bytes(x, "utf-8") for x in v]
features[k] = tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
elif isinstance(v[0], bytes):
features[k] = tf.train.Feature(bytes_list=tf.train.BytesList(value=v))
else:
raise ValueError("Value for %s is not a recognized type; v: %s type: %s" %
(k, str(v[0]), str(type(v[0]))))
return tf.train.Example(features=tf.train.Features(feature=features))
示例2: serving_input_receiver_fn
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Example [as 别名]
def serving_input_receiver_fn():
"""Creates an input function for serving."""
seq_len = FLAGS.max_seq_length
serialized_example = tf.placeholder(
dtype=tf.string, shape=[None], name="serialized_example")
features = {
"input_ids": tf.FixedLenFeature([seq_len], dtype=tf.int64),
"input_mask": tf.FixedLenFeature([seq_len], dtype=tf.int64),
"segment_ids": tf.FixedLenFeature([seq_len], dtype=tf.int64),
}
feature_map = tf.parse_example(serialized_example, features=features)
feature_map["is_real_example"] = tf.constant(1, dtype=tf.int32)
feature_map["label_ids"] = tf.constant(0, dtype=tf.int32)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in feature_map.keys():
t = feature_map[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
feature_map[name] = t
return tf.estimator.export.ServingInputReceiver(
features=feature_map, receiver_tensors=serialized_example)
示例3: process_feature
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Example [as 别名]
def process_feature(self, feature):
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
self.num_features += 1
def create_int_feature(values):
feature = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
return feature
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([feature.unique_id])
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
if self.is_training:
features["start_positions"] = create_int_feature([feature.start_position])
features["end_positions"] = create_int_feature([feature.end_position])
impossible = 0
if feature.is_impossible:
impossible = 1
features["is_impossible"] = create_int_feature([impossible])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
self._writer.write(tf_example.SerializeToString())
示例4: file_based_convert_examples_to_features
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Example [as 别名]
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
示例5: process_feature
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Example [as 别名]
def process_feature(self, feature):
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
self.num_features += 1
def create_int_feature(values):
feature = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
return feature
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([feature.unique_id])
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
if self.is_training:
features["label_ids"] = create_int_feature([feature.label_id])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
self._writer.write(tf_example.SerializeToString())
示例6: convert_single_example
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Example [as 别名]
def convert_single_example(example, rand_example, max_seq_length, tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
# Add padding examples here
example_type = collections.namedtuple(
"Example", ["input_ids", "input_mask", "segment_ids", "labels"])
labels = range(8) # inconsequential
rand_sents = rand_example[:8]
target_sents = example[:4] + example[5:] + rand_sents
bert_input = create_cpc_input_from_text(
tokenizer,
example[4],
target_sents,
labels,
group_size=16,
max_seq_length=max_seq_length)
feature = example_type(bert_input.tokens, bert_input.mask, bert_input.seg_ids,
labels)
return feature
示例7: construct_pipeline
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Example [as 别名]
def construct_pipeline(pipeline, input_tfrecord, output_tfrecord, model_dir,
confidence_threshold, num_shards):
"""Returns a Beam pipeline to run object detection inference.
Args:
pipeline: Initialized beam pipeline.
input_tfrecord: A TFRecord of tf.train.Example protos containing images.
output_tfrecord: A TFRecord of tf.train.Example protos that contain images
in the input TFRecord and the detections from the model.
model_dir: Path to `saved_model` to use for inference.
confidence_threshold: Threshold to use when keeping detection results.
num_shards: The number of output shards.
"""
input_collection = (
pipeline | 'ReadInputTFRecord' >> beam.io.tfrecordio.ReadFromTFRecord(
input_tfrecord,
coder=beam.coders.BytesCoder()))
output_collection = input_collection | 'RunInference' >> beam.ParDo(
GenerateDetectionDataFn(model_dir, confidence_threshold))
output_collection = output_collection | 'Reshuffle' >> beam.Reshuffle()
_ = output_collection | 'WritetoDisk' >> beam.io.tfrecordio.WriteToTFRecord(
output_tfrecord,
num_shards=num_shards,
coder=beam.coders.ProtoCoder(tf.train.Example))
示例8: tfrecord_iterator
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Example [as 别名]
def tfrecord_iterator(filenames, gzipped=False, example_spec=None):
"""Yields records from TFRecord files.
Args:
filenames: list<str>, list of TFRecord filenames to read from.
gzipped: bool, whether the TFRecord files are gzip-encoded.
example_spec: dict<str feature name, tf.VarLenFeature/tf.FixedLenFeature>,
if provided, will parse each record as a tensorflow.Example proto.
Yields:
Records (or parsed Examples, if example_spec is provided) from files.
"""
with tf.Graph().as_default():
dataset = tf.data.Dataset.from_tensor_slices(filenames)
def _load_records(filename):
return tf.data.TFRecordDataset(
filename,
compression_type=tf.constant("GZIP") if gzipped else None,
buffer_size=16 * 1000 * 1000)
dataset = dataset.flat_map(_load_records)
def _parse_example(ex_ser):
return tf.parse_single_example(ex_ser, example_spec)
if example_spec:
dataset = dataset.map(_parse_example, num_parallel_calls=32)
dataset = dataset.prefetch(100)
record_it = dataset.make_one_shot_iterator().get_next()
with tf.Session() as sess:
while True:
try:
ex = sess.run(record_it)
yield ex
except tf.errors.OutOfRangeError:
break
示例9: _decode_record
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Example [as 别名]
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
example[name] = t
return example
示例10: classification_convert_examples_to_features
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Example [as 别名]
def classification_convert_examples_to_features(
examples, max_seq_length, batch_size, encoder, output_file, labels, pad_extra_examples=False,
chop_from_front_if_needed=True):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
label_map = {label: i for i, label in enumerate(labels)}
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
# begin_summary is our [CLS] token
tokens = example['ids'] + [encoder.begin_summary]
if len(tokens) > max_seq_length:
if chop_from_front_if_needed:
tokens = tokens[-max_seq_length:]
else:
tokens = example['ids'][:(max_seq_length-1)] + [encoder.begin_summary]
elif len(tokens) < max_seq_length:
tokens.extend([encoder.padding] * (max_seq_length - len(tokens)))
features = collections.OrderedDict()
features['input_ids'] = tf.train.Feature(int64_list=tf.train.Int64List(value=tokens))
features['label_ids'] = tf.train.Feature(int64_list=tf.train.Int64List(value=[label_map[example['label']]]))
features['is_real_example'] = tf.train.Feature(int64_list=tf.train.Int64List(value=[1]))
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
if pad_extra_examples:
for x in range(len(examples) % batch_size):
features = collections.OrderedDict()
features['input_ids'] = tf.train.Feature(int64_list=tf.train.Int64List(value=[0]*max_seq_length))
features['label_ids'] = tf.train.Feature(int64_list=tf.train.Int64List(value=[0]))
features['is_real_example'] = tf.train.Feature(int64_list=tf.train.Int64List(value=[0]))
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
示例11: file_based_convert_examples_to_features
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Example [as 别名]
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file, task_name):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer, task_name)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
def create_float_feature(values):
f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_float_feature([feature.label_id])\
if task_name == "sts-b" else create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
示例12: process_feature
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Example [as 别名]
def process_feature(self, feature):
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
self.num_features += 1
def create_int_feature(values):
feature = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
return feature
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([feature.unique_id])
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["p_mask"] = create_int_feature(feature.p_mask)
if self.is_training:
features["start_positions"] = create_int_feature([feature.start_position])
features["end_positions"] = create_int_feature([feature.end_position])
impossible = 0
if feature.is_impossible:
impossible = 1
features["is_impossible"] = create_int_feature([impossible])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
self._writer.write(tf_example.SerializeToString())
示例13: get_example
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Example [as 别名]
def get_example(self, batch_size):
"""Get a single example from the tfrecord file.
Args:
batch_size: Int, minibatch size.
Returns:
tf.Example protobuf parsed from tfrecord.
"""
reader = tf.TFRecordReader()
num_epochs = None if self.is_training else 1
capacity = batch_size
path_queue = tf.train.input_producer(
[self.record_path],
num_epochs=num_epochs,
shuffle=self.is_training,
capacity=capacity)
unused_key, serialized_example = reader.read(path_queue)
features = {
"note_str": tf.FixedLenFeature([], dtype=tf.string),
"pitch": tf.FixedLenFeature([1], dtype=tf.int64),
"velocity": tf.FixedLenFeature([1], dtype=tf.int64),
"audio": tf.FixedLenFeature([64000], dtype=tf.float32),
"qualities": tf.FixedLenFeature([10], dtype=tf.int64),
"instrument_source": tf.FixedLenFeature([1], dtype=tf.int64),
"instrument_family": tf.FixedLenFeature([1], dtype=tf.int64),
}
example = tf.parse_single_example(serialized_example, features)
return example
示例14: create_metaexample_spec
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Example [as 别名]
def create_metaexample_spec(
model_spec,
num_samples_per_task,
prefix):
"""Converts a model feature/label spec into a MetaExample spec.
Args:
model_spec: The base model tensor spec.
num_samples_per_task: Number of episodes in the task.
prefix: The tf.Example feature column name prefix.
Returns:
A TSpecStructure. For each spec in model_spec, the output contains
num_samples_per_task corresponding specs stored as: "<name>/i".
"""
model_spec = utils.flatten_spec_structure(model_spec)
meta_example_spec = TSpecStructure()
for key in model_spec.keys():
for i in range(num_samples_per_task):
spec = model_spec[key]
name_prefix = '{:s}_ep{:d}'.format(prefix, i)
new_name = name_prefix + '/' + six.ensure_str(spec.name)
meta_example_spec[key + '/{:}'.format(i)] = (
utils.ExtendedTensorSpec.from_spec(
spec, name=new_name))
return meta_example_spec
示例15: get_feature_specification
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Example [as 别名]
def get_feature_specification(
self, mode):
"""Required features for the model_fn/model_inference_fn.
Note, the model_fn might use additional features for debugging/development
purposes. The create_export_outputs_fn will however only require the
specified required features. Only this subset of features will be used to
generate automatic tf.Example extractors and numpy placeholders for the
serving models.
Args:
mode: The mode for feature specifications
"""