本文整理汇总了Python中tensorflow.contrib.framework.get_global_step方法的典型用法代码示例。如果您正苦于以下问题:Python framework.get_global_step方法的具体用法?Python framework.get_global_step怎么用?Python framework.get_global_step使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.framework
的用法示例。
在下文中一共展示了framework.get_global_step方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: before_run
# 需要导入模块: from tensorflow.contrib import framework [as 别名]
# 或者: from tensorflow.contrib.framework import get_global_step [as 别名]
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(
{'global_step': contrib_framework.get_global_step(),
'current_loss': run_context.session.graph.get_operation_by_name(
LOSS_NAME).outputs[0]})
示例2: _loss_to_train_op
# 需要导入模块: from tensorflow.contrib import framework [as 别名]
# 或者: from tensorflow.contrib.framework import get_global_step [as 别名]
def _loss_to_train_op(self, loss):
"""Map `loss` to a training op."""
with ops.name_scope('loss_to_train_op'):
trainable_variables = ops.get_default_graph().get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
global_step = contrib_framework.get_global_step()
gradients = self._optimizer.compute_gradients(
loss=loss, var_list=trainable_variables)
processed_gradients = self._process_gradients(gradients)
return self._optimizer.apply_gradients(
processed_gradients, global_step=global_step)
示例3: _get_train_ops
# 需要导入模块: from tensorflow.contrib import framework [as 别名]
# 或者: from tensorflow.contrib.framework import get_global_step [as 别名]
def _get_train_ops(self, features, targets):
"""Method that builds model graph and returns trainer ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
targets: `Tensor` or `dict` of `Tensor` objects.
Returns:
Tuple of train `Operation` and loss `Tensor`.
"""
features, spec = data_ops.ParseDataTensorOrDict(features)
labels = data_ops.ParseLabelTensorOrDict(targets)
graph_builder = self.graph_builder_class(
self.params, device_assigner=self.device_assigner,
**self.construction_args)
epoch = None
if self.data_feeder:
epoch = self.data_feeder.make_epoch_variable()
train = control_flow_ops.group(
graph_builder.training_graph(
features, labels, data_spec=spec, epoch=epoch,
**self.training_args),
state_ops.assign_add(contrib_framework.get_global_step(), 1))
self.training_loss = graph_builder.training_loss(features, targets)
return train, self.training_loss
示例4: get_model_fn
# 需要导入模块: from tensorflow.contrib import framework [as 别名]
# 或者: from tensorflow.contrib.framework import get_global_step [as 别名]
def get_model_fn(params, graph_builder_class, device_assigner,
weights_name=None, keys_name=None, num_trainers=1,
trainer_id=0):
"""Return a model function given a way to construct a graph builder."""
def _model_fn(features, labels):
"""Function that returns predictions, training loss, and training op."""
weights = None
keys = None
if weights_name and weights_name in features:
weights = features.pop(weights_name)
if keys_name and keys_name in features:
keys = features.pop(keys_name)
graph_builder = graph_builder_class(params, device_assigner=device_assigner)
inference = {
eval_metrics.INFERENCE_PROB_NAME:
graph_builder.inference_graph(features)
}
if not params.regression:
inference[eval_metrics.INFERENCE_PRED_NAME] = math_ops.argmax(
inference[eval_metrics.INFERENCE_PROB_NAME], 1)
if keys:
inference[KEYS_NAME] = keys
# labels might be None if we're doing prediction (which brings up the
# question of why we force everything to adhere to a single model_fn).
training_loss = None
training_graph = None
if labels is not None:
training_loss = graph_builder.training_loss(
features, labels, name=LOSS_NAME)
training_graph = control_flow_ops.group(
graph_builder.training_graph(
features, labels, input_weights=weights,
num_trainers=num_trainers,
trainer_id=trainer_id),
state_ops.assign_add(contrib_framework.get_global_step(), 1))
# Put weights back in
if weights is not None:
features[weights_name] = weights
return (inference, training_loss, training_graph)
return _model_fn
示例5: get_model_fn
# 需要导入模块: from tensorflow.contrib import framework [as 别名]
# 或者: from tensorflow.contrib.framework import get_global_step [as 别名]
def get_model_fn(params, graph_builder_class, device_assigner,
weights_name=None, keys_name=None):
"""Return a model function given a way to construct a graph builder."""
def _model_fn(features, labels):
"""Function that returns predictions, training loss, and training op."""
weights = None
keys = None
if weights_name and weights_name in features:
weights = features.pop(weights_name)
if keys_name and keys_name in features:
keys = features.pop(keys_name)
processed_features, spec = data_ops.ParseDataTensorOrDict(features)
_assert_float32(processed_features)
if labels is not None:
labels = data_ops.ParseLabelTensorOrDict(labels)
_assert_float32(labels)
graph_builder = graph_builder_class(params, device_assigner=device_assigner)
inference = {eval_metrics.INFERENCE_PROB_NAME:
graph_builder.inference_graph(processed_features,
data_spec=spec)}
if not params.regression:
inference[eval_metrics.INFERENCE_PRED_NAME] = math_ops.argmax(
inference[eval_metrics.INFERENCE_PROB_NAME], 1)
if keys:
inference[KEYS_NAME] = keys
# labels might be None if we're doing prediction (which brings up the
# question of why we force everything to adhere to a single model_fn).
training_loss = None
training_graph = None
if labels is not None:
training_loss = graph_builder.training_loss(processed_features, labels,
data_spec=spec,
name=LOSS_NAME)
training_graph = control_flow_ops.group(
graph_builder.training_graph(
processed_features, labels, data_spec=spec,
input_weights=weights),
state_ops.assign_add(contrib_framework.get_global_step(), 1))
# Put weights back in
if weights is not None:
features[weights_name] = weights
return (inference, training_loss, training_graph)
return _model_fn