本文整理匯總了Python中tensorflow.python.estimator.model_fn.EstimatorSpec方法的典型用法代碼示例。如果您正苦於以下問題:Python model_fn.EstimatorSpec方法的具體用法?Python model_fn.EstimatorSpec怎麽用?Python model_fn.EstimatorSpec使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.python.estimator.model_fn
的用法示例。
在下文中一共展示了model_fn.EstimatorSpec方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _train_spec
# 需要導入模塊: from tensorflow.python.estimator import model_fn [as 別名]
# 或者: from tensorflow.python.estimator.model_fn import EstimatorSpec [as 別名]
def _train_spec(tower_specs,
train_op,
aggregation_device,
aggregated_loss_name='loss'):
"""Populate replicated EstimatorSpec for `GraphKeys.TRAIN`."""
# Spec of the last tower is used as the template for the final spec, because
# some `EstimatorSpec.training_hooks` rely on calls made in model_fn. For
# example, `SyncReplicasOptimizerHook` validates the
# `SyncReplicasOptimizer.apply_gradients` call. `TowerEstimator` makes that
# call only in the last tower.
estimator_spec = _asdict(tower_specs[-1])
estimator_spec['mode'] = model_fn_lib.ModeKeys.TRAIN
estimator_spec['train_op'] = train_op
estimator_spec['loss'] = _compute_sum_on_device(
[spec.loss for spec in tower_specs], aggregation_device,
aggregated_loss_name)
return model_fn_lib.EstimatorSpec(**estimator_spec)
示例2: _eval_spec
# 需要導入模塊: from tensorflow.python.estimator import model_fn [as 別名]
# 或者: from tensorflow.python.estimator.model_fn import EstimatorSpec [as 別名]
def _eval_spec(tower_specs, aggregation_device, aggregated_loss_name='loss'):
"""Populate replicated EstimatorSpec for `GraphKeys.EVAL`."""
estimator_spec = _asdict(tower_specs[0])
estimator_spec['mode'] = model_fn_lib.ModeKeys.EVAL
estimator_spec['loss'] = _compute_sum_on_device(
[spec.loss for spec in tower_specs], aggregation_device,
aggregated_loss_name)
update_ops = []
for tower_spec in tower_specs:
for name, (_, update_op) in six.iteritems(tower_spec.eval_metric_ops):
update_ops.append(update_op)
with ops_lib.control_dependencies(update_ops):
reduced_update_op = _reduce_metric_variables(len(tower_specs))
eval_metric_ops = {}
for name, (metric_tensor, _) in six.iteritems(tower_specs[0].eval_metric_ops):
eval_metric_ops[name] = (metric_tensor, reduced_update_op)
estimator_spec['eval_metric_ops'] = eval_metric_ops
return model_fn_lib.EstimatorSpec(**estimator_spec)
示例3: create_estimator_spec
# 需要導入模塊: from tensorflow.python.estimator import model_fn [as 別名]
# 或者: from tensorflow.python.estimator.model_fn import EstimatorSpec [as 別名]
def create_estimator_spec(
self, features, mode, logits, labels=None, train_op_fn=None):
"""Returns `EstimatorSpec` that a model_fn can return.
Please note that,
+ All args must be passed via name.
Args:
features: Input `dict` of `Tensor` objects.
mode: Estimator's `ModeKeys`.
logits: logits `Tensor` to be used by the head.
labels: Labels `Tensor`, or `dict` of same.
train_op_fn: Function that takes a scalar loss `Tensor` and returns an op
to optimize the model with the loss. This is used in TRAIN mode and
must not be None. None is allowed in other modes. If you want to
optimize loss yourself you can pass `no_op_train_fn` and then use
EstimatorSpec.loss to compute and apply gradients.
Returns:
`EstimatorSpec`.
"""
raise NotImplementedError('Calling an abstract method.')
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:24,代碼來源:head.py
示例4: _call_model_fn
# 需要導入模塊: from tensorflow.python.estimator import model_fn [as 別名]
# 或者: from tensorflow.python.estimator.model_fn import EstimatorSpec [as 別名]
def _call_model_fn(self, features, labels, mode):
"""Calls model function.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
Returns:
An `EstimatorSpec` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
model_fn_args = _model_fn_args(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = self.config
model_fn_results = self._model_fn(
features=features, labels=labels, **kwargs)
if not isinstance(model_fn_results, model_fn_lib.EstimatorSpec):
raise ValueError('model_fn should return an EstimatorSpec.')
return model_fn_results
示例5: get_estimator
# 需要導入模塊: from tensorflow.python.estimator import model_fn [as 別名]
# 或者: from tensorflow.python.estimator.model_fn import EstimatorSpec [as 別名]
def get_estimator(self, tf):
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.estimator.model_fn import EstimatorSpec
def model_fn(features, labels, mode, params):
with tf.gfile.GFile(self.graph_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
input_names = ['input_ids', 'input_mask', 'input_type_ids']
output = tf.import_graph_def(graph_def,
input_map={k + ':0': features[k] for k in input_names},
return_elements=['final_encodes:0'])
return EstimatorSpec(mode=mode, predictions={
'client_id': features['client_id'],
'encodes': output[0]
})
config = tf.ConfigProto(device_count={'GPU': 0 if self.device_id < 0 else 1})
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = self.gpu_memory_fraction
config.log_device_placement = False
# session-wise XLA doesn't seem to work on tf 1.10
# if args.xla:
# config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
return Estimator(model_fn=model_fn, config=RunConfig(session_config=config))
示例6: as_estimator_spec
# 需要導入模塊: from tensorflow.python.estimator import model_fn [as 別名]
# 或者: from tensorflow.python.estimator.model_fn import EstimatorSpec [as 別名]
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
host_calls = {}
if self.eval_metrics is not None:
host_calls['eval_metrics'] = self.eval_metrics
if self.host_call is not None:
host_calls['host_call'] = self.host_call
host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)
eval_metric_ops = None
if self.eval_metrics is not None:
eval_metric_ops = host_call_ret['eval_metrics']
hooks = None
if self.host_call is not None:
hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]
if tensor_tracer.TensorTracer.is_enabled():
tt = tensor_tracer.TensorTracer()
tracing_calls = tt.trace_cpu(ops.get_default_graph())
tracing_call_ret = _OutfeedHostCall.create_cpu_hostcall(tracing_calls)
tracing_functions = tracing_call_ret.values()
if tracing_functions:
if hooks:
hooks.extend([_OutfeedHostCallHook(tracing_functions)])
else:
hooks = [_OutfeedHostCallHook(tracing_functions)]
hooks = tuple(hooks or [])
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=self.loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold,
training_hooks=self.training_hooks + hooks,
evaluation_hooks=self.evaluation_hooks + hooks,
prediction_hooks=self.prediction_hooks + hooks)
示例7: _verify_estimator_spec
# 需要導入模塊: from tensorflow.python.estimator import model_fn [as 別名]
# 或者: from tensorflow.python.estimator.model_fn import EstimatorSpec [as 別名]
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(
err_msg.format('training_chief_hooks') + 'If you want' +
' to pass training hooks, please pass via training_hooks.')
if estimator_spec.scaffold:
logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
示例8: _scale_tower_loss
# 需要導入模塊: from tensorflow.python.estimator import model_fn [as 別名]
# 或者: from tensorflow.python.estimator.model_fn import EstimatorSpec [as 別名]
def _scale_tower_loss(tower_spec, loss_reduction, number_of_towers):
"""Produce an EstimatorSpec with approproriately scaled loss."""
if tower_spec.loss is None:
return tower_spec
estimator_spec = _asdict(tower_spec)
estimator_spec['loss'] = _scale_loss(tower_spec.loss, loss_reduction,
number_of_towers)
return model_fn_lib.EstimatorSpec(**estimator_spec)
示例9: get_estimator
# 需要導入模塊: from tensorflow.python.estimator import model_fn [as 別名]
# 或者: from tensorflow.python.estimator.model_fn import EstimatorSpec [as 別名]
def get_estimator(self):
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.estimator.model_fn import EstimatorSpec
def model_fn(features, labels, mode, params):
with tf.gfile.GFile(self.graph_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
input_names = ['input_ids', 'input_mask', 'input_type_ids']
output = tf.import_graph_def(graph_def,
input_map={k + ':0': features[k] for k in input_names},
return_elements=['final_encodes:0'])
return EstimatorSpec(mode=mode, predictions={
'encodes': output[0]
})
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = self.gpu_memory_fraction
config.log_device_placement = False
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
return Estimator(model_fn=model_fn, config=RunConfig(session_config=config),
params={'batch_size': self.batch_size}, model_dir='../tmp')
示例10: as_estimator_spec
# 需要導入模塊: from tensorflow.python.estimator import model_fn [as 別名]
# 或者: from tensorflow.python.estimator.model_fn import EstimatorSpec [as 別名]
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
host_calls = {}
if self.eval_metrics is not None:
host_calls['eval_metrics'] = self.eval_metrics
if self.host_call is not None:
host_calls['host_call'] = self.host_call
host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)
eval_metric_ops = None
if self.eval_metrics is not None:
eval_metric_ops = host_call_ret['eval_metrics']
hooks = None
if self.host_call is not None:
hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]
hooks = list(hooks or [])
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=self.loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold,
training_hooks=self.training_hooks + hooks,
evaluation_hooks=self.evaluation_hooks + hooks,
prediction_hooks=self.prediction_hooks + hooks)
示例11: get_estimator
# 需要導入模塊: from tensorflow.python.estimator import model_fn [as 別名]
# 或者: from tensorflow.python.estimator.model_fn import EstimatorSpec [as 別名]
def get_estimator(self):
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.estimator.model_fn import EstimatorSpec
def model_fn(features, labels, mode, params):
with tf.gfile.GFile(self.graph_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
input_names = ['input_ids', 'input_mask', 'input_type_ids']
output = tf.import_graph_def(graph_def,
input_map={k + ':0': features[k] for k in input_names},
return_elements=['final_encodes:0'])
return EstimatorSpec(mode=mode, predictions={
'encodes': output[0]
})
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = self.gpu_memory_fraction
config.log_device_placement = False
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
return Estimator(model_fn=model_fn, config=RunConfig(session_config=config),
params={'batch_size': self.batch_size})
示例12: _call_model_fn
# 需要導入模塊: from tensorflow.python.estimator import model_fn [as 別名]
# 或者: from tensorflow.python.estimator.model_fn import EstimatorSpec [as 別名]
def _call_model_fn(self, features, labels, mode, config):
"""Calls model function.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
config: RunConfig
Returns:
An `EstimatorSpec` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
model_fn_args = util.fn_args(self._model_fn)
kwargs = {}
if 'labels' in model_fn_args:
kwargs['labels'] = labels
else:
if labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = config
model_fn_results = self._model_fn(features=features, **kwargs)
if not isinstance(model_fn_results, model_fn_lib.EstimatorSpec):
raise ValueError('model_fn should return an EstimatorSpec.')
return model_fn_results
示例13: _call_model_fn
# 需要導入模塊: from tensorflow.python.estimator import model_fn [as 別名]
# 或者: from tensorflow.python.estimator.model_fn import EstimatorSpec [as 別名]
def _call_model_fn(self, features, labels, mode, config):
"""Calls model function.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
config: RunConfig
Returns:
An `EstimatorSpec` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
model_fn_args = util.fn_args(self._model_fn)
kwargs = {}
if 'labels' in model_fn_args:
kwargs['labels'] = labels
else:
if labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = config
model_fn_results = self._model_fn(features=features, **kwargs)
if not isinstance(model_fn_results, model_fn_lib.EstimatorSpec):
raise ValueError('model_fn should return an EstimatorSpec.')
return model_fn_results
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:37,代碼來源:estimator.py
示例14: __new__
# 需要導入模塊: from tensorflow.python.estimator import model_fn [as 別名]
# 或者: from tensorflow.python.estimator.model_fn import EstimatorSpec [as 別名]
def __new__(cls, model, step=None, train_op=None, **kwargs):
if "mode" not in kwargs:
raise ValueError("Must provide a mode (TRAIN/EVAL/PREDICT) when "
"creating an EstimatorSpec")
if train_op is None:
raise ValueError(
"Must provide train_op for creating a PruningEstimatorSpec")
def _get_step_increment_ops(model, step=None):
"""Returns ops to increment the pruning_step in the prunable layers."""
increment_ops = []
for layer in model.layers:
if isinstance(layer, PruneLowMagnitude):
if step is None:
# Add ops to increment the pruning_step by 1
increment_ops.append(state_ops.assign_add(layer.pruning_step, 1))
else:
increment_ops.append(
state_ops.assign(layer.pruning_step,
math_ops.cast(step, dtypes.int32)))
return control_flow_ops.group(increment_ops)
pruning_ops = []
# Grab the ops to update pruning step in every prunable layer
step_increment_ops = _get_step_increment_ops(model, step)
pruning_ops.append(step_increment_ops)
# Grab the model updates.
pruning_ops.append(model.updates)
kwargs["train_op"] = control_flow_ops.group(pruning_ops, train_op)
def init_fn(scaffold, session): # pylint: disable=unused-argument
return session.run(step_increment_ops)
def get_new_scaffold(old_scaffold):
if old_scaffold.init_fn is None:
return monitored_session.Scaffold(
init_fn=init_fn, copy_from_scaffold=old_scaffold)
# TODO(suyoggupta): Figure out a way to merge the init_fn of the
# original scaffold with the one defined above.
raise ValueError("Scaffold provided to PruningEstimatorSpec must not "
"set an init_fn.")
scaffold = monitored_session.Scaffold(init_fn=init_fn)
if "scaffold" in kwargs:
scaffold = get_new_scaffold(kwargs["scaffold"])
kwargs["scaffold"] = scaffold
return super(PruningEstimatorSpec, cls).__new__(cls, **kwargs)
示例15: _call_model_fn
# 需要導入模塊: from tensorflow.python.estimator import model_fn [as 別名]
# 或者: from tensorflow.python.estimator.model_fn import EstimatorSpec [as 別名]
def _call_model_fn(self, features, labels, cache=None, is_export_mode=False):
"""Calls the model_fn with required parameters."""
self._validate_model_features_and_labels(features, labels, is_export_mode)
model_fn_args = function_utils.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._ctx.mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if cache is not None:
params['cache'] = cache
if 'params' not in model_fn_args:
raise ValueError('model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
if is_export_mode:
batch_size_for_model_fn = None
else:
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
_add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn)
running_on_cpu = self._ctx.is_running_on_cpu(is_export_mode)
_add_item_to_params(params, _USE_TPU_KEY, not running_on_cpu)
if not running_on_cpu:
user_context = tpu_context.TPUContext(
internal_ctx=self._ctx, call_from_input_fn=False)
_add_item_to_params(params, _CTX_KEY, user_context)
estimator_spec = self._model_fn(features=features, **kwargs)
if (running_on_cpu and
isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`.
return estimator_spec.as_estimator_spec()
else:
return estimator_spec