本文整理汇总了Python中tensorflow.python.ops.clip_ops.clip_by_global_norm方法的典型用法代码示例。如果您正苦于以下问题:Python clip_ops.clip_by_global_norm方法的具体用法?Python clip_ops.clip_by_global_norm怎么用?Python clip_ops.clip_by_global_norm使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.clip_ops
的用法示例。
在下文中一共展示了clip_ops.clip_by_global_norm方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_train_step
# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_global_norm [as 别名]
def get_train_step(self, loss):
"""Returns the ops to run to perform a training step on this estimator.
Args:
loss: The loss to use when calculating gradients.
Returns:
The ops to run to perform a training step.
"""
my_vars = self._get_vars()
if not (self._get_feature_columns() or my_vars):
return []
grads = gradients.gradients(loss, my_vars)
if self._gradient_clip_norm:
grads, _ = clip_ops.clip_by_global_norm(grads, self._gradient_clip_norm)
return [self._get_optimizer().apply_gradients(zip(grads, my_vars))]
示例2: __init__
# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_global_norm [as 别名]
def __init__(self,
num_label_columns,
optimizer,
gradient_clip_norm,
num_ps_replicas,
scope):
"""Common initialization for all _ComposableModel objects.
Args:
num_label_columns: The number of label columns.
optimizer: An instance of `tf.Optimizer` used to apply gradients to
the model. If `None`, will use a FTRL optimizer.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
num_ps_replicas: The number of parameter server replicas.
scope: Scope for variables created in this model.
"""
self._num_label_columns = num_label_columns
self._optimizer = optimizer
self._gradient_clip_norm = gradient_clip_norm
self._num_ps_replicas = num_ps_replicas
self._scope = scope
self._feature_columns = None
示例3: _clip_gradients_by_norm
# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_global_norm [as 别名]
def _clip_gradients_by_norm(grads_and_vars, clip_gradients):
"""Clips gradients by global norm."""
gradients, variables = zip(*grads_and_vars)
clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients, clip_gradients)
return list(zip(clipped_gradients, variables))
示例4: __init__
# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_global_norm [as 别名]
def __init__(self,
num_label_columns,
optimizer,
gradient_clip_norm,
num_ps_replicas,
scope,
trainable=True):
"""Common initialization for all _ComposableModel objects.
Args:
num_label_columns: The number of label columns.
optimizer: An instance of `tf.Optimizer` used to apply gradients to
the model. If `None`, will use a FTRL optimizer.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
num_ps_replicas: The number of parameter server replicas.
scope: Scope for variables created in this model.
trainable: True if this model contains variables that can be trained.
False otherwise (in cases where the variables are used strictly for
transforming input labels for training).
"""
self._num_label_columns = num_label_columns
self._optimizer = optimizer
self._gradient_clip_norm = gradient_clip_norm
self._num_ps_replicas = num_ps_replicas
self._scope = scope
self._trainable = trainable
self._feature_columns = None
示例5: _process_gradients
# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_global_norm [as 别名]
def _process_gradients(self, gradients_vars):
"""Process gradients (e.g. clipping) before applying them to weights."""
with ops.name_scope('process_gradients'):
gradients, variables = zip(*gradients_vars)
if self._gradient_clipping_norm is not None:
gradients, _ = clip_ops.clip_by_global_norm(
gradients, self._gradient_clipping_norm)
return zip(gradients, variables)
示例6: __init__
# 需要导入模块: from tensorflow.python.ops import clip_ops [as 别名]
# 或者: from tensorflow.python.ops.clip_ops import clip_by_global_norm [as 别名]
def __init__(self, # _joint_weights: pylint: disable=invalid-name
feature_columns,
head,
model_dir=None,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
_joint_weights=False,
config=None,
feature_engineering_fn=None):
"""Construct a `LinearEstimator` object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
head: An instance of _Head class.
model_dir: Directory to save model parameters, graph, etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Ftrl optimizer.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
_joint_weights: If True use a single (possibly partitioned) variable to
store the weights. It's faster, but requires all feature columns are
sparse and have the 'sum' combiner. Incompatible with SDCAOptimizer.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
Returns:
A `LinearEstimator` estimator.
Raises:
ValueError: if optimizer is not supported, e.g., SDCAOptimizer
"""
assert feature_columns
if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
raise ValueError("LinearEstimator does not support SDCA optimizer.")
params = {
"head": head,
"feature_columns": feature_columns,
"optimizer": optimizer,
"gradient_clip_norm": gradient_clip_norm,
"joint_weights": _joint_weights,
}
super(LinearEstimator, self).__init__(
model_fn=_linear_model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)