本文整理汇总了Python中tensorflow.Optimizer方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.Optimizer方法的具体用法?Python tensorflow.Optimizer怎么用?Python tensorflow.Optimizer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.Optimizer方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _optimize_clone
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Optimizer [as 别名]
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
**kwargs):
"""Compute losses and gradients for a single clone.
Args:
optimizer: A tf.Optimizer object.
clone: A Clone namedtuple.
num_clones: The number of clones being deployed.
regularization_losses: Possibly empty list of regularization_losses
to add to the clone losses.
**kwargs: Dict of kwarg to pass to compute_gradients().
Returns:
A tuple (clone_loss, clone_grads_and_vars).
- clone_loss: A tensor for the total loss for the clone. Can be None.
- clone_grads_and_vars: List of (gradient, variable) for the clone.
Can be empty.
"""
sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
clone_grad = None
if sum_loss is not None:
with tf.device(clone.device):
clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
return sum_loss, clone_grad
示例2: _optimize_clone
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Optimizer [as 别名]
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
**kwargs):
"""Compute losses and gradients for a single clone.
Args:
optimizer: A tf.Optimizer object.
clone: A Clone namedtuple.
num_clones: The number of clones being deployed.
regularization_losses: Possibly empty list of regularization_losses
to add to the clone losses.
**kwargs: Dict of kwarg to pass to compute_gradients().
Returns:
A tuple (clone_loss, clone_grads_and_vars).
- clone_loss: A tensor for the total loss for the clone. Can be None.
- clone_grads_and_vars: List of (gradient, variable) for the clone.
Can be empty.
"""
sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
clone_grad = None
if sum_loss is not None:
with tf.device(clone.device):
clone_grad = optimizer.compute_gradients(sum_loss+clone.reg_loss, **kwargs)
return sum_loss, clone_grad
示例3: compute_gradients
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Optimizer [as 别名]
def compute_gradients(self, *args, **kwargs):
"""Compute gradients of all trainable variables.
See Optimizer.compute_gradients() for more info.
In DistributedOptimizer, compute_gradients() is overriden to also
allreduce the gradients before returning them.
"""
gradients = self._optimizer.compute_gradients(*args, **kwargs)
if size() > 1:
averaged_gradients = []
with tf.name_scope(self._name + "_Allreduce"):
for grad, var in gradients:
if grad is not None:
avg_grad = allreduce(grad,
device_dense=self._device_dense,
device_sparse=self._device_sparse,
compression=self._compression)
averaged_gradients.append((avg_grad, var))
else:
averaged_gradients.append((None, var))
return averaged_gradients
else:
return gradients
示例4: _optimize_clone
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Optimizer [as 别名]
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
**kwargs):
"""Compute losses and gradients for a single clone.
Args:
optimizer: A tf.Optimizer object.
clone: A Clone namedtuple.
num_clones: The number of clones being deployed.
regularization_losses: Possibly empty list of regularization_losses
to add to the clone losses.
**kwargs: Dict of kwarg to pass to compute_gradients().
Returns:
A tuple (clone_loss, clone_grads_and_vars).
- clone_loss: A tensor for the total loss for the clone. Can be None.
- clone_grads_and_vars: List of (gradient, variable) for the clone.
Can be empty.
"""
sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
clone_grad = None
if sum_loss is not None:
with tf.device(clone.device):
clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
return sum_loss, clone_grad