本文整理汇总了Python中tensorflow.compat.v1.Examples方法的典型用法代码示例。如果您正苦于以下问题:Python v1.Examples方法的具体用法?Python v1.Examples怎么用?Python v1.Examples使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.Examples方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: serialized_to_parsed
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Examples [as 别名]
def serialized_to_parsed(dataset,
feature_tspec,
label_tspec,
num_parallel_calls = 2):
"""Auto-generating TFExample parsing code from feature and label tensor specs.
Supports both single-TFExample parsing (default) and batched parsing (e.g.
when we are pulling batches from Replay Buffer).
Args:
dataset: tf.data.Dataset whose outputs are Dict[dataset_key, serialized
tf.Examples].
feature_tspec: Collection of TensorSpec designating how to extract features.
label_tspec: Collection of TensorSpec designating how to extract labels.
num_parallel_calls: (Optional.) A tf.int32 scalar tf.Tensor, representing
the number elements to process in parallel. If not specified, elements
will be processed sequentially.
Returns:
tf.data.Dataset whose output is single (features, labels) tuple.
"""
parse_tf_example_fn = create_parse_tf_example_fn(
feature_tspec=feature_tspec, label_tspec=label_tspec)
dataset = dataset.map(
map_func=parse_tf_example_fn, num_parallel_calls=num_parallel_calls)
return dataset
示例2: count_preprocessing_fn
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Examples [as 别名]
def count_preprocessing_fn(text_key, language_code_key):
"""Generates a preprocessing function to be used in generating word counts.
Args:
text_key: feature key in tf.Example for text
language_code_key: feature key in tf.Example for language_code
Returns:
a preprocessing function
"""
def preprocessing_fn(inputs):
"""Function used to transform dataset using TF transform.
Tokenizes input and detects language if there is no associated
language_code.
Args:
inputs: dataset of tf.Examples containing text samples
Returns:
transformed outputs
"""
outputs = {}
tokenizer = BertTokenizer()
tokens = tokenizer.tokenize(inputs[text_key])
outputs['tokens'] = tokens.to_sparse()
outputs['lang'] = tf.convert_to_tensor(inputs[language_code_key])
return outputs
return preprocessing_fn
示例3: word_count
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Examples [as 别名]
def word_count(input_path, output_path, raw_metadata, min_token_frequency=2):
"""Returns a pipeline counting words and writing the output.
Args:
input_path: recordio file to read
output_path: path in which to write the output
raw_metadata: metadata of input tf.Examples
min_token_frequency: the min frequency for a token to be included
"""
lang_set = set(FLAGS.lang_set.split(','))
# Create pipeline.
pipeline = beam.Pipeline()
with tft_beam.Context(temp_dir=tempfile.mkdtemp()):
converter = tft.coders.ExampleProtoCoder(
raw_metadata.schema, serialized=False)
# Read raw data and convert to TF Transform encoded dict.
raw_data = (
pipeline
| 'ReadInputData' >> beam.io.tfrecordio.ReadFromTFRecord(
input_path, coder=beam.coders.ProtoCoder(tf.train.Example))
| 'DecodeInputData' >> beam.Map(converter.decode))
# Apply TF Transform.
(transformed_data, _), _ = (
(raw_data, raw_metadata)
| 'FilterLangAndExtractToken' >> tft_beam.AnalyzeAndTransformDataset(
utils.count_preprocessing_fn(FLAGS.text_key,
FLAGS.language_code_key)))
# Filter by languages.
tokens = (
transformed_data
| 'FilterByLang' >> beam.ParDo(utils.FilterTokensByLang(lang_set)))
# Calculate smoothing coefficients.
coeffs = (
tokens
| 'CalculateSmoothingCoefficients' >> beam.CombineGlobally(
utils.CalculateCoefficients(FLAGS.smoothing_exponent)))
# Apply smoothing, aggregate counts, and sort words by count.
_ = (
tokens
| 'ApplyExponentialSmoothing' >> beam.ParDo(
utils.ExponentialSmoothing(), beam.pvalue.AsSingleton(coeffs))
| 'SumCounts' >> beam.CombinePerKey(sum)
| 'FilterLowCounts' >> beam.ParDo(utils.FilterByCount(
FLAGS.max_word_length, min_token_frequency))
| 'MergeAndSortCounts' >> beam.CombineGlobally(utils.SortByCount())
| 'Flatten' >> beam.FlatMap(lambda x: x)
| 'FormatCounts' >> beam.Map(lambda tc: '%s\t%s' % (tc[0], tc[1]))
| 'WriteSortedCount' >> beam.io.WriteToText(
output_path, shard_name_template=''))
return pipeline
示例4: create_predict_input_fn
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import Examples [as 别名]
def create_predict_input_fn(model_config, predict_input_config):
"""Creates a predict `input` function for `Estimator`.
Args:
model_config: A model_pb2.DetectionModel.
predict_input_config: An input_reader_pb2.InputReader.
Returns:
`input_fn` for `Estimator` in PREDICT mode.
"""
def _predict_input_fn(params=None):
"""Decodes serialized tf.Examples and returns `ServingInputReceiver`.
Args:
params: Parameter dictionary passed from the estimator.
Returns:
`ServingInputReceiver`.
"""
del params
example = tf.placeholder(dtype=tf.string, shape=[], name='tf_example')
num_classes = config_util.get_number_of_classes(model_config)
model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build'](
model_config, is_training=False).preprocess
image_resizer_config = config_util.get_image_resizer_config(model_config)
image_resizer_fn = image_resizer_builder.build(image_resizer_config)
transform_fn = functools.partial(
transform_input_data, model_preprocess_fn=model_preprocess_fn,
image_resizer_fn=image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=None)
decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=False,
num_additional_channels=predict_input_config.num_additional_channels)
input_dict = transform_fn(decoder.decode(example))
images = tf.cast(input_dict[fields.InputDataFields.image], dtype=tf.float32)
images = tf.expand_dims(images, axis=0)
true_image_shape = tf.expand_dims(
input_dict[fields.InputDataFields.true_image_shape], axis=0)
return tf.estimator.export.ServingInputReceiver(
features={
fields.InputDataFields.image: images,
fields.InputDataFields.true_image_shape: true_image_shape},
receiver_tensors={SERVING_FED_EXAMPLE_KEY: example})
return _predict_input_fn