本文整理汇总了Python中tensorflow.python.saved_model.tag_constants.TPU属性的典型用法代码示例。如果您正苦于以下问题:Python tag_constants.TPU属性的具体用法?Python tag_constants.TPU怎么用?Python tag_constants.TPU使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类tensorflow.python.saved_model.tag_constants
的用法示例。
在下文中一共展示了tag_constants.TPU属性的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _sync_variables_ops
# 需要导入模块: from tensorflow.python.saved_model import tag_constants [as 别名]
# 或者: from tensorflow.python.saved_model.tag_constants import TPU [as 别名]
def _sync_variables_ops(ctx):
"""Create varriables synchronization ops.
Gets the variables back from TPU nodes. This means the variables updated
by TPU will now be *synced* to host memory.
In BROADCAST mode, we skip this sync since the variables are ususally too
big to transmit via RPC.
Args:
ctx: A `_InternalTPUContext` instance with mode.
Returns:
A list of sync ops.
"""
if not ctx.is_input_broadcast_with_iterators():
return [
array_ops.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in variables.trainable_variables()
]
else:
return [control_flow_ops.no_op()]
示例2: _increase_eval_step_op
# 需要导入模块: from tensorflow.python.saved_model import tag_constants [as 别名]
# 或者: from tensorflow.python.saved_model.tag_constants import TPU [as 别名]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps running in TPU system
before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
示例3: __init__
# 需要导入模块: from tensorflow.python.saved_model import tag_constants [as 别名]
# 或者: from tensorflow.python.saved_model.tag_constants import TPU [as 别名]
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_InternalTPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder(
ctx.input_partition_dims)
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
示例4: generate_infeed_enqueue_ops_and_dequeue_fn
# 需要导入模块: from tensorflow.python.saved_model import tag_constants [as 别名]
# 或者: from tensorflow.python.saved_model.tag_constants import TPU [as 别名]
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (
self._invoke_input_fn_and_record_structure())
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
# In the model-parallel case, both the host-side and device-side
# computations must agree on the core on which infeed takes place. We
# choose to perform infeed on logical core 0 of each replica.
values = self._infeed_queue.generate_dequeue_op(tpu_device=0)
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(
values)
return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)
示例5: create_cpu_hostcall
# 需要导入模块: from tensorflow.python.saved_model import tag_constants [as 别名]
# 或者: from tensorflow.python.saved_model.tag_constants import TPU [as 别名]
def create_cpu_hostcall(host_calls):
"""Runs on the host_call on CPU instead of TPU when use_tpu=False."""
_OutfeedHostCall.validate(host_calls)
ret = {}
for name, host_call in host_calls.items():
host_fn, tensors = host_call
if isinstance(tensors, (tuple, list)):
ret[name] = host_fn(*tensors)
else:
# Must be dict.
try:
ret[name] = host_fn(**tensors)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise e
return ret
示例6: _convert_train_steps_to_hooks
# 需要导入模块: from tensorflow.python.saved_model import tag_constants [as 别名]
# 或者: from tensorflow.python.saved_model.tag_constants import TPU [as 别名]
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_train_steps_to_hooks(
steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [
_TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps)
]
示例7: _increase_eval_step_op
# 需要导入模块: from tensorflow.python.saved_model import tag_constants [as 别名]
# 或者: from tensorflow.python.saved_model.tag_constants import TPU [as 别名]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps running in TPU
system before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
示例8: begin
# 需要导入模块: from tensorflow.python.saved_model import tag_constants [as 别名]
# 或者: from tensorflow.python.saved_model.tag_constants import TPU [as 别名]
def begin(self):
logging.info('TPU job name %s', self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
self._init_ops = []
if self._should_initialize_tpu:
self._finalize_ops = [tpu.shutdown_system(job=self._master_job)]
else:
self._finalize_ops = []
summary_writer_init_ops = contrib_summary.summary_writer_initializer_op()
self._init_ops.extend(summary_writer_init_ops)
# Get all the writer resources from the initializer, so we know what to
# flush.
for op in summary_writer_init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
示例9: after_create_session
# 需要导入模块: from tensorflow.python.saved_model import tag_constants [as 别名]
# 或者: from tensorflow.python.saved_model.tag_constants import TPU [as 别名]
def after_create_session(self, session, coord):
if self._should_initialize_tpu:
logging.info('Init TPU system')
start = time.time()
with ops.Graph().as_default():
with tf_session.Session(
self._master, config=self._session_config) as sess:
sess.run(tpu.initialize_system(job=self._master_job))
logging.info('Initialized TPU in %d seconds', time.time() - start)
session.run(self._init_ops,
options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000))
if os.environ.get('TPU_SPLIT_COMPILE_AND_EXECUTE', '') == '1':
logging.info('Compiling user program: this may take a while...')
self._assertCompilationSucceeded(session.run(self._tpu_compile_op), coord)
self._infeed_controller = self._create_infeed_controller(
name='InfeedController', target=self._run_infeed, args=(session,))
self._outfeed_controller = _OpQueueContext(
name='OutfeedController', target=self._run_outfeed, args=(session,))
# Enable the worker watchdog to terminate workers on coordinator exit.
watchdog_timeout = int(os.environ.get('TF_TPU_WATCHDOG_TIMEOUT', '0'))
if watchdog_timeout > 0:
session_support.start_worker_watchdog(session,
shutdown_timeout=watchdog_timeout)
示例10: _verify_estimator_spec
# 需要导入模块: from tensorflow.python.saved_model import tag_constants [as 别名]
# 或者: from tensorflow.python.saved_model.tag_constants import TPU [as 别名]
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(
err_msg.format('training_chief_hooks') + 'If you want' +
' to pass training hooks, please pass via training_hooks.')
if estimator_spec.scaffold:
logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
示例11: _convert_eval_steps_to_hooks
# 需要导入模块: from tensorflow.python.saved_model import tag_constants [as 别名]
# 或者: from tensorflow.python.saved_model.tag_constants import TPU [as 别名]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
return [
evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps),
_SetEvalIterationsHook(steps)
]
示例12: _eval_on_tpu_system
# 需要导入模块: from tensorflow.python.saved_model import tag_constants [as 别名]
# 或者: from tensorflow.python.saved_model.tag_constants import TPU [as 别名]
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
) = model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn)
def multi_tpu_eval_steps_on_single_shard():
loop_vars = [_ZERO_LOSS]
if model_fn_wrapper._eval_cache_fn is not None:
batch_size = ctx.global_batch_size
num_shards = ctx._config._tpu_config.num_shards
loop_vars += model_fn_wrapper._eval_cache_fn(batch_size // num_shards)
return training_loop.repeat(
iterations_per_loop_var,
single_tpu_eval_step,
loop_vars)
compile_op, ret = tpu.split_compile_and_shard(
multi_tpu_eval_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
loss = ret[0]
scaffold = _get_scaffold(captured_scaffold_fn)
return compile_op, loss, host_calls, scaffold, captured_eval_hooks.get()
示例13: _train_on_tpu_system
# 需要导入模块: from tensorflow.python.saved_model import tag_constants [as 别名]
# 或者: from tensorflow.python.saved_model.tag_constants import TPU [as 别名]
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_train_step, host_call, captured_scaffold_fn,
captured_training_hooks) = (
model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))
def multi_tpu_train_steps_on_single_shard():
loop_vars = [_INITIAL_LOSS]
if model_fn_wrapper._train_cache_fn is not None:
batch_size = ctx.global_batch_size
num_shards = ctx._config._tpu_config.num_shards
loop_vars += model_fn_wrapper._train_cache_fn(batch_size // num_shards)
return training_loop.repeat(
iterations_per_loop_var,
single_tpu_train_step,
loop_vars)
compile_op, ret = tpu.split_compile_and_shard(
multi_tpu_train_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
loss = ret[0]
scaffold = _get_scaffold(captured_scaffold_fn)
return compile_op, loss, host_call, scaffold, captured_training_hooks.get()
示例14: AddOp
# 需要导入模块: from tensorflow.python.saved_model import tag_constants [as 别名]
# 或者: from tensorflow.python.saved_model.tag_constants import TPU [as 别名]
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access
raise ValueError('{}: Op {} depends on TPU computation {}, '
'which is not allowed.'.format(self._message, op, c))