本文整理汇总了Python中tensorflow.contrib.tpu.RunConfig方法的典型用法代码示例。如果您正苦于以下问题:Python tpu.RunConfig方法的具体用法?Python tpu.RunConfig怎么用?Python tpu.RunConfig使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.tpu
的用法示例。
在下文中一共展示了tpu.RunConfig方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: make_tpu_run_config
# 需要导入模块: from tensorflow.contrib import tpu [as 别名]
# 或者: from tensorflow.contrib.tpu import RunConfig [as 别名]
def make_tpu_run_config(master, seed, model_dir, iterations_per_loop,
save_checkpoints_steps):
return contrib_tpu.RunConfig(
master=master,
evaluation_master=master,
model_dir=model_dir,
save_checkpoints_steps=save_checkpoints_steps,
cluster=None,
tf_random_seed=seed,
tpu_config=contrib_tpu.TPUConfig(iterations_per_loop=iterations_per_loop))
示例2: get_tpu_run_config
# 需要导入模块: from tensorflow.contrib import tpu [as 别名]
# 或者: from tensorflow.contrib.tpu import RunConfig [as 别名]
def get_tpu_run_config(self):
"""Get the TPU RunConfig for Estimator model.
Returns:
contrib_tpu.RunConfig() for this model.
"""
return gin_configurable_tpu_run_config_cls(
master=FLAGS.master, tpu_config=gin_configurable_tpu_config_cls())
示例3: get_run_config
# 需要导入模块: from tensorflow.contrib import tpu [as 别名]
# 或者: from tensorflow.contrib.tpu import RunConfig [as 别名]
def get_run_config(self):
"""Get the RunConfig for Estimator model."""
return self._t2r_model.get_run_config()
示例4: get_tpu_run_config
# 需要导入模块: from tensorflow.contrib import tpu [as 别名]
# 或者: from tensorflow.contrib.tpu import RunConfig [as 别名]
def get_tpu_run_config(self):
"""Get the TPU RunConfig for Estimator model."""
return self._t2r_model.get_tpu_run_config()
示例5: get_run_config
# 需要导入模块: from tensorflow.contrib import tpu [as 别名]
# 或者: from tensorflow.contrib.tpu import RunConfig [as 别名]
def get_run_config(self):
"""Get the RunConfig for Estimator model."""
示例6: get_tpu_run_config
# 需要导入模块: from tensorflow.contrib import tpu [as 别名]
# 或者: from tensorflow.contrib.tpu import RunConfig [as 别名]
def get_tpu_run_config(self):
"""Get the TPU RunConfig for Estimator model."""
示例7: _train_and_eval_reference_model
# 需要导入模块: from tensorflow.contrib import tpu [as 别名]
# 或者: from tensorflow.contrib.tpu import RunConfig [as 别名]
def _train_and_eval_reference_model(self, path, multi_dataset=False):
model_dir = self.create_tempdir().full_path
mock_t2r_model = mocks.MockT2RModel(
preprocessor_cls=noop_preprocessor.NoOpPreprocessor,
multi_dataset=multi_dataset)
# We create a tpu estimator for potential training.
estimator = contrib_tpu.TPUEstimator(
model_fn=mock_t2r_model.model_fn,
use_tpu=mock_t2r_model.is_device_tpu,
config=contrib_tpu.RunConfig(model_dir=model_dir),
train_batch_size=BATCH_SIZE,
eval_batch_size=BATCH_SIZE)
mock_input_generator = mocks.MockInputGenerator(batch_size=BATCH_SIZE,
multi_dataset=multi_dataset)
mock_input_generator.set_specification_from_model(
mock_t2r_model, tf.estimator.ModeKeys.TRAIN)
# We optimize our network.
estimator.train(
input_fn=mock_input_generator.create_dataset_input_fn(
mode=tf.estimator.ModeKeys.TRAIN),
max_steps=MAX_STEPS)
# Verify that the serving estimator does exactly the same as the normal
# estimator with all the parameters.
estimator_predict = tf.estimator.Estimator(
model_fn=mock_t2r_model.model_fn,
config=tf.estimator.RunConfig(model_dir=model_dir))
prediction_ref = estimator_predict.predict(
input_fn=mock_input_generator.create_dataset_input_fn(
mode=tf.estimator.ModeKeys.EVAL))
return model_dir, mock_t2r_model, prediction_ref
示例8: test_create_serving_input_receiver_numpy
# 需要导入模块: from tensorflow.contrib import tpu [as 别名]
# 或者: from tensorflow.contrib.tpu import RunConfig [as 别名]
def test_create_serving_input_receiver_numpy(self):
(model_dir, mock_t2r_model,
prediction_ref) = self._train_and_eval_reference_model('numpy')
exporter = default_export_generator.DefaultExportGenerator()
exporter.set_specification_from_model(mock_t2r_model)
# Export trained serving estimator.
estimator_exporter = tf.estimator.Estimator(
model_fn=mock_t2r_model.model_fn,
config=tf.estimator.RunConfig(model_dir=model_dir))
serving_input_receiver_fn = (
exporter.create_serving_input_receiver_numpy_fn())
exported_savedmodel_path = estimator_exporter.export_saved_model(
export_dir_base=model_dir,
serving_input_receiver_fn=serving_input_receiver_fn,
checkpoint_path=tf.train.latest_checkpoint(model_dir))
# Load trained and exported serving estimator, run prediction and assert
# it is the same as before exporting.
feed_predictor_fn = contrib_predictor.from_saved_model(
exported_savedmodel_path)
mock_input_generator = mocks.MockInputGenerator(batch_size=BATCH_SIZE)
features, labels = mock_input_generator.create_numpy_data()
for pos, value in enumerate(prediction_ref):
actual = feed_predictor_fn({'x': features[pos, :].reshape(
1, -1)})['logit'].flatten()
predicted = value['logit'].flatten()
np.testing.assert_almost_equal(
actual=actual, desired=predicted, decimal=4)
if labels[pos] > 0:
self.assertGreater(predicted[0], 0)
else:
self.assertLess(predicted[0], 0)
示例9: __init__
# 需要导入模块: from tensorflow.contrib import tpu [as 别名]
# 或者: from tensorflow.contrib.tpu import RunConfig [as 别名]
def __init__(self, tokenizer, init_checkpoint):
"""Setup BERT model."""
self.max_seq_length = FLAGS.max_hotpot_seq_length
self.max_qry_length = FLAGS.max_hotpot_query_length
self.batch_size = 1
self.tokenizer = tokenizer
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
with tf.device("/cpu:0"):
model_fn = hotpot_model_fn_builder(
bert_config=bert_config,
init_checkpoint=init_checkpoint,
learning_rate=0.0,
num_train_steps=0,
num_warmup_steps=0,
use_tpu=False,
use_one_hot_embeddings=False)
run_config = contrib_tpu.RunConfig()
estimator = contrib_tpu.TPUEstimator(
use_tpu=False,
model_fn=model_fn,
config=run_config,
train_batch_size=self.batch_size,
predict_batch_size=self.batch_size)
self.fast_predictor = FastPredict(estimator,
self.get_input_fn)
self._PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["start_index", "end_index", "start_logit", "end_logit"])
示例10: inference_network_fn
# 需要导入模块: from tensorflow.contrib import tpu [as 别名]
# 或者: from tensorflow.contrib.tpu import RunConfig [as 别名]
def inference_network_fn(
self,
features,
labels,
mode,
config = None,
params = None):
"""The inference network implementation.
This creates the main network based on features.
Optionally (mode=ModeKeys.TRAIN or ModeKeys.EVAL) the model can do
additional processing on labels, however, it has to be ensured that this is
optional and the graph is fully operational without labels. At inference
time we will have no access to labels. Tensors which are required for loss
computation or debugging must be put into the inference_outputs dict.
Having a dedicated inference_network_fn allows to compose new networks by
using other TFModels.
Please, use the following pattern to add not supported tpu model components
such as tf.summary.*
if self.use_summaries(params):
# Do operations which are not supported on tpus.
If your model does not support TPUs at all, please call the following
function.
self.raise_no_tpu_support()
Args:
features: This is the first item returned from the input_fn and parsed by
tensorspec_utils.validate_and_pack. A spec_structure which fulfills the
requirements of the self.get_feature_specification.
labels: This is the second item returned from the input_fn and parsed by
tensorspec_utils.validate_and_pack. A spec_structure which fulfills the
requirements of the self.get_feature_specification.
mode: (ModeKeys) Specifies if this is training, evaluation or prediction.
config: (Optional tf.estimator.RunConfig or contrib_tpu.RunConfig) Will
receive what is passed to Estimator in config parameter, or the default
config (tf.estimator.RunConfig). Allows updating things in your model_fn
based on configuration such as num_ps_replicas, or model_dir.
params: An optional dict of hyper parameters that will be passed into
input_fn and model_fn. Keys are names of parameters, values are basic
python types. There are reserved keys for TPUEstimator, including
'batch_size'.
Returns:
inference_outputs: A dict with output tensors.
"""
示例11: model_train_fn
# 需要导入模块: from tensorflow.contrib import tpu [as 别名]
# 或者: from tensorflow.contrib.tpu import RunConfig [as 别名]
def model_train_fn(self,
features,
labels,
inference_outputs,
mode,
config = None,
params = None):
"""The training model implementation.
This model_fn should add the loss computation based on the inference_outputs
and labels. For better debugging we also provide access to the input
features. Note, no new variables should be generated in this model_fn since
the model_inference_fn and the maybe_init_from_checkpoint function would
not have access to these variables. We output the final loss (scalar) and
a dict of optional train_outputs which might be useful for the
model_eval_fn.
Please, use the following pattern to add not supported tpu model components
such as tf.summary.*
if self.use_summaries(params):
# Do operations which are not supported on tpus.
If your model does not support TPUs at all, please call the following
function.
self.raise_no_tpu_support()
Args:
features: This is the first item returned from the input_fn and parsed by
tensorspec_utils.validate_and_pack. A spec_structure which fulfills the
requirements of the self.get_feature_specification.
labels: This is the second item returned from the input_fn and parsed by
tensorspec_utils.validate_and_pack. A spec_structure which fulfills the
requirements of the self.get_feature_specification.
inference_outputs: A dict containing the output tensors of
model_inference_fn.
mode: (ModeKeys) Specifies if this is training, evaluation or prediction.
config: (Optional tf.estimator.RunConfig or contrib_tpu.RunConfig) Will
receive what is passed to Estimator in config parameter, or the default
config (tf.estimator.RunConfig). Allows updating things in your model_fn
based on configuration such as num_ps_replicas, or model_dir.
params: An optional dict of hyper parameters that will be passed into
input_fn and model_fn. Keys are names of parameters, values are basic
python types. There are reserved keys for TPUEstimator, including
'batch_size'.
Returns:
loss: The loss we will optimize.
train_outputs: (Optional) A dict with additional tensors the training
model generates. We output these tensors such that model_eval_fn could
introspect these tensors.
"""
示例12: model_eval_fn
# 需要导入模块: from tensorflow.contrib import tpu [as 别名]
# 或者: from tensorflow.contrib.tpu import RunConfig [as 别名]
def model_eval_fn(self,
features,
labels,
inference_outputs,
train_loss,
train_outputs,
mode,
config = None,
params = None):
"""The eval model implementation, by default we report the loss for eval.
This function should add the eval_metrics computation based on the
inference_outputs, labels and the train_loss. For better debugging we also
provide access to the input features and the train_outputs. Note, no new
variables should be generated in this model_fn since the model_inference_fn
and the maybe_init_from_checkpoint function would not have access to these
variables.
Please, use the following pattern to add not supported tpu model components
such as tf.summary.*
if self.use_summaries(params):
# Do operations which are not supported on tpus.
If your model does not support TPUs at all, please call the following
function.
self.raise_no_tpu_support()
Args:
features: This is the first item returned from the input_fn and parsed by
tensorspec_utils.validate_and_pack. A spec_structure which fulfills the
requirements of the self.get_feature_specification.
labels: This is the second item returned from the input_fn and parsed by
tensorspec_utils.validate_and_pack. A spec_structure which fulfills the
requirements of the self.get_feature_specification.
inference_outputs: A dict containing the output tensors of
model_inference_fn.
train_loss: The final loss from model_train_fn.
train_outputs: A dict containing the output tensors (dict) of
model_train_fn.
mode: (ModeKeys) Specifies if this is training, evaluation or prediction.
config: (Optional tf.estimator.RunConfig or contrib_tpu.RunConfig) Will
receive what is passed to Estimator in config parameter, or the default
config (tf.estimator.RunConfig). Allows updating things in your model_fn
based on configuration such as num_ps_replicas, or model_dir.
params: An optional dict of hyper parameters that will be passed into
input_fn and model_fn. Keys are names of parameters, values are basic
python types. There are reserved keys for TPUEstimator, including
'batch_size'.
Returns:
eval_metrics: A tuple of (metric_fn, metric_fn_inputs) where metric_fn
is a dict with {metric_description: tf.metrics.*}.
"""
del features, labels, inference_outputs, train_loss, train_outputs
del mode, config, params
# By default we don't have any eval_metrics. The loss computation used
# to optimize the model_fn will be reported for the model_eval_fn as well.
# Hence, by default the EVAL mode can be used to determine the loss
# performance on the eval dataset or even a larger train dataset.
return None
示例13: add_summaries
# 需要导入模块: from tensorflow.contrib import tpu [as 别名]
# 或者: from tensorflow.contrib.tpu import RunConfig [as 别名]
def add_summaries(self,
features,
labels,
inference_outputs,
train_loss,
train_outputs,
mode,
config = None,
params = None):
"""Add summaries to the graph.
Having a central place to add all summaries to the graph is helpful in order
to compose models. For example, if an inference_network_fn is used within
a while loop no summaries can be added. This function will allow to add
summaries after the while loop has been processed.
Args:
features: This is the first item returned from the input_fn and parsed by
tensorspec_utils.validate_and_pack. A spec_structure which fulfills the
requirements of the self.get_feature_specification.
labels: This is the second item returned from the input_fn and parsed by
tensorspec_utils.validate_and_pack. A spec_structure which fulfills the
requirements of the self.get_feature_specification.
inference_outputs: A dict containing the output tensors of
model_inference_fn.
train_loss: The final loss from model_train_fn.
train_outputs: A dict containing the output tensors (dict) of
model_train_fn.
mode: (ModeKeys) Specifies if this is training, evaluation or prediction.
config: (Optional tf.estimator.RunConfig or contrib_tpu.RunConfig) Will
receive what is passed to Estimator in config parameter, or the default
config (tf.estimator.RunConfig). Allows updating things in your model_fn
based on configuration such as num_ps_replicas, or model_dir.
params: An optional dict of hyper parameters that will be passed into
input_fn and model_fn. Keys are names of parameters, values are basic
python types. There are reserved keys for TPUEstimator, including
'batch_size'.
"""
del features, labels, inference_outputs, train_loss, train_outputs, mode
del config
if not self.use_summaries(params):
return
示例14: create_export_outputs_fn
# 需要导入模块: from tensorflow.contrib import tpu [as 别名]
# 或者: from tensorflow.contrib.tpu import RunConfig [as 别名]
def create_export_outputs_fn(self,
features,
inference_outputs,
mode,
config = None,
params = None):
"""We export the final output used for model inference.
This model_fn should create the optional export_outputs, see
tf.estimator.EstimatorSpec for a more in depth description, and the
required predictions dict. Note, the predictions dict should more often
than not be a small subset of the inference_outputs.
Please, use the following pattern to add not supported tpu model components
such as tf.summary.*
if self.use_summaries(params):
# Do operations which are not supported on tpus.
If your model does not support TPUs at all, please call the following
function.
self.raise_no_tpu_support()
Args:
features: This is the first item returned from the input_fn and parsed by
tensorspec_utils.validate_and_pack. A spec_structure which fulfills the
requirements of the self.get_feature_specification.
inference_outputs: A dict containing the output tensors of
model_inference_fn.
mode: (ModeKeys) Specifies if this is training, evaluation or prediction.
config: (Optional tf.estimator.RunConfig or contrib_tpu.RunConfig) Will
receive what is passed to Estimator in config parameter, or the default
config (tf.estimator.RunConfig). Allows updating things in your model_fn
based on configuration such as num_ps_replicas, or model_dir.
params: An optional dict of hyper parameters that will be passed into
input_fn and model_fn. Keys are names of parameters, values are basic
python types. There are reserved keys for TPUEstimator, including
'batch_size'.
Returns:
predictions: A dict of tensors.
export_outputs: (Optional) A dict containing an arbitrary name for the
output and tf.estimator.export.PredictOutput(output_dict) as value.
The output dict is a {name: tensor} mapping. If None, the default
mapping for predictions is generated. The export_outputs are used
for the serving model. Multi-headed models should have one name
per head.
"""
del features, mode, config, params
# By default we will export all outputs generated by the
# inference_network_fn.
return inference_outputs
示例15: test_create_serving_input_receiver_tf_example
# 需要导入模块: from tensorflow.contrib import tpu [as 别名]
# 或者: from tensorflow.contrib.tpu import RunConfig [as 别名]
def test_create_serving_input_receiver_tf_example(self, multi_dataset):
(model_dir, mock_t2r_model,
prediction_ref) = self._train_and_eval_reference_model(
'tf_example', multi_dataset=multi_dataset)
# Now we can actually export our serving estimator.
estimator_exporter = tf.estimator.Estimator(
model_fn=mock_t2r_model.model_fn,
config=tf.estimator.RunConfig(model_dir=model_dir))
exporter = default_export_generator.DefaultExportGenerator()
exporter.set_specification_from_model(mock_t2r_model)
serving_input_receiver_fn = (
exporter.create_serving_input_receiver_tf_example_fn())
exported_savedmodel_path = estimator_exporter.export_saved_model(
export_dir_base=model_dir,
serving_input_receiver_fn=serving_input_receiver_fn,
checkpoint_path=tf.train.latest_checkpoint(model_dir))
# Now we can load our exported estimator graph, there are no dependencies
# on the model_fn or preprocessor anymore.
feed_predictor_fn = contrib_predictor.from_saved_model(
exported_savedmodel_path)
mock_input_generator = mocks.MockInputGenerator(batch_size=BATCH_SIZE)
features, labels = mock_input_generator.create_numpy_data()
for pos, value in enumerate(prediction_ref):
# We have to create our serialized tf.Example proto.
example = tf.train.Example()
example.features.feature['measured_position'].float_list.value.extend(
features[pos])
serialized_example = np.array(example.SerializeToString()).reshape(1,)
if multi_dataset:
feed_dict = {
'input_example_dataset1': serialized_example,
'input_example_dataset2': serialized_example
}
else:
feed_dict = {
'input_example_tensor': serialized_example
}
actual = feed_predictor_fn(feed_dict)['logit'].flatten()
predicted = value['logit'].flatten()
np.testing.assert_almost_equal(
actual=actual, desired=predicted, decimal=4)
if labels[pos] > 0:
self.assertGreater(predicted[0], 0)
else:
self.assertLess(predicted[0], 0)