本文整理汇总了Python中tensorflow.python.training.optimizer.minimize方法的典型用法代码示例。如果您正苦于以下问题:Python optimizer.minimize方法的具体用法?Python optimizer.minimize怎么用?Python optimizer.minimize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.training.optimizer
的用法示例。
在下文中一共展示了optimizer.minimize方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: minimize
# 需要导入模块: from tensorflow.python.training import optimizer [as 别名]
# 或者: from tensorflow.python.training.optimizer import minimize [as 别名]
def minimize(self, loss, global_step=None, var_list=None,
gate_gradients=optimizer.Optimizer.GATE_OP,
aggregation_method=None, colocate_gradients_with_ops=False,
name=None, grad_loss=None, decay_var_list=None):
"""Add operations to minimize `loss` by updating `var_list` with decay.
This function is the same as Optimizer.minimize except that it allows to
specify the variables that should be decayed using decay_var_list.
If decay_var_list is None, all variables in var_list are decayed.
For more information see the documentation of Optimizer.minimize.
"""
self._decay_var_list = set(decay_var_list) if decay_var_list else False
return super(DecoupledWeightDecayExtension, self).minimize(
loss, global_step=global_step, var_list=var_list,
gate_gradients=gate_gradients, aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops, name=name,
grad_loss=grad_loss)
示例2: minimize
# 需要导入模块: from tensorflow.python.training import optimizer [as 别名]
# 或者: from tensorflow.python.training.optimizer import minimize [as 别名]
def minimize(self, loss, global_step=None, var_list=None,
gate_gradients=optimizer.Optimizer.GATE_OP,
aggregation_method=None, colocate_gradients_with_ops=False,
name=None, grad_loss=None):
"""Add operations to minimize `loss` by updating `var_list` with decay.
This function is the same as Optimizer.minimize except that it allows to
specify the variables that should be decayed using decay_var_list.
If decay_var_list is None, all variables in var_list are decayed.
For more information see the documentation of Optimizer.minimize.
Args:
loss: A `Tensor` containing the value to minimize.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
var_list: Optional list or tuple of `Variable` objects to update to
minimize `loss`. Defaults to the list of variables collected in
the graph under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
name: Optional name for the returned operation.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
decay_var_list: Optional list of decay variables.
Returns:
An Operation that updates the variables in `var_list`. If `global_step`
was not `None`, that operation also increments `global_step`.
"""
return super(DecoupledWeightDecayExtension, self).minimize(
loss, global_step=global_step, var_list=var_list,
gate_gradients=gate_gradients, aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops, name=name,
grad_loss=grad_loss)
示例3: __init__
# 需要导入模块: from tensorflow.python.training import optimizer [as 别名]
# 或者: from tensorflow.python.training.optimizer import minimize [as 别名]
def __init__(self, weight_decay, **kwargs):
"""Construct the extension class that adds weight decay to an optimizer.
Args:
weight_decay: A `Tensor` or a floating point value, the factor by which
a variable is decayed in the update step.
**kwargs: Optional list or tuple or set of `Variable` objects to
decay.
"""
self._decay_var_list = None # is set in minimize or apply_gradients
self._weight_decay = weight_decay
# The tensors are initialized in call to _prepare
self._weight_decay_tensor = None
super(DecoupledWeightDecayExtension, self).__init__(**kwargs)
示例4: minimize
# 需要导入模块: from tensorflow.python.training import optimizer [as 别名]
# 或者: from tensorflow.python.training.optimizer import minimize [as 别名]
def minimize(self, loss, global_step=None, var_list=None,
gate_gradients=optimizer.Optimizer.GATE_OP,
aggregation_method=None, colocate_gradients_with_ops=False,
name=None, grad_loss=None, decay_var_list=None):
"""Add operations to minimize `loss` by updating `var_list` with decay.
This function is the same as Optimizer.minimize except that it allows to
specify the variables that should be decayed using decay_var_list.
If decay_var_list is None, all variables in var_list are decayed.
For more information see the documentation of Optimizer.minimize.
Args:
loss: A `Tensor` containing the value to minimize.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
var_list: Optional list or tuple of `Variable` objects to update to
minimize `loss`. Defaults to the list of variables collected in
the graph under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
name: Optional name for the returned operation.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
decay_var_list: Optional list of decay variables.
Returns:
An Operation that updates the variables in `var_list`. If `global_step`
was not `None`, that operation also increments `global_step`.
"""
self._decay_var_list = set(decay_var_list) if decay_var_list else False
return super(DecoupledWeightDecayExtension, self).minimize(
loss, global_step=global_step, var_list=var_list,
gate_gradients=gate_gradients, aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops, name=name,
grad_loss=grad_loss)
示例5: __init__
# 需要导入模块: from tensorflow.python.training import optimizer [as 别名]
# 或者: from tensorflow.python.training.optimizer import minimize [as 别名]
def __init__(self, weight_decay, **kwargs):
"""Construct the extension class that adds weight decay to an optimizer.
Args:
weight_decay: A `Tensor` or a floating point value, the factor by which
a variable is decayed in the update step.
**kwargs: Optional list or tuple or set of `Variable` objects to
decay.
"""
self._decay_var_list = None # is set in minimize or apply_gradients
self._weight_decay = weight_decay
# The tensors are initialized in call to _prepare
self._weight_decay_tensor = None
super(DecoupledWeightDecayExtension, self).__init__(**kwargs)
示例6: minimize
# 需要导入模块: from tensorflow.python.training import optimizer [as 别名]
# 或者: from tensorflow.python.training.optimizer import minimize [as 别名]
def minimize(self, loss, global_step=None, var_list=None,
gate_gradients=optimizer.Optimizer.GATE_OP,
aggregation_method=None, colocate_gradients_with_ops=False,
name=None, grad_loss=None, decay_var_list=None):
"""Add operations to minimize `loss` by updating `var_list` with decay.
This function is the same as Optimizer.minimize except that it allows to
specify the variables that should be decayed using decay_var_list.
If decay_var_list is None, all variables in var_list are decayed.
For more information see the documentation of Optimizer.minimize.
Args:
loss: A `Tensor` containing the value to minimize.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
var_list: Optional list or tuple of `Variable` objects to update to
minimize `loss`. Defaults to the list of variables collected in
the graph under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
name: Optional name for the returned operation.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
decay_var_list: Optional list of decay variables.
Returns:
An Operation that updates the variables in `var_list`. If `global_step`
was not `None`, that operation also increments `global_step`.
"""
self._decay_var_list = set(decay_var_list) if decay_var_list else False
return super(DecoupledWeightDecayExtension, self).minimize(
loss, global_step=global_step, var_list=var_list,
gate_gradients=gate_gradients, aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops, name=name,
grad_loss=grad_loss)
示例7: __init__
# 需要导入模块: from tensorflow.python.training import optimizer [as 别名]
# 或者: from tensorflow.python.training.optimizer import minimize [as 别名]
def __init__(self, weight_decay, **kwargs):
"""Construct the extension class that adds weight decay to an optimizer.
Args:
weight_decay: A `Tensor` or a floating point value, the factor by which
a variable is decayed in the update step.
decay_var_list: Optional list or tuple or set of `Variable` objects to
decay.
"""
self._decay_var_list = None # is set in minimize or apply_gradients
self._weight_decay = weight_decay
# The tensors are initialized in call to _prepare
self._weight_decay_tensor = None
super(DecoupledWeightDecayExtension, self).__init__(**kwargs)
示例8: __init__
# 需要导入模块: from tensorflow.python.training import optimizer [as 别名]
# 或者: from tensorflow.python.training.optimizer import minimize [as 别名]
def __init__(self, weight_decay, **kwargs):
"""Construct the extension class that adds weight decay to an optimizer.
Args:
weight_decay: A `Tensor` or a floating point value, the factor by which
a variable is decayed in the update step.
**kwargs: Optional list or tuple or set of `Variable` objects to
decay.
"""
self._decay_var_list = None # is set in minimize or apply_gradients
self._weight_decay = weight_decay
# The tensors are initialized in call to _prepare
self._weight_decay_tensor = None
super(DecoupledWeightDecayExtension, self).__init__(**kwargs)
示例9: minimize
# 需要导入模块: from tensorflow.python.training import optimizer [as 别名]
# 或者: from tensorflow.python.training.optimizer import minimize [as 别名]
def minimize(self, loss, global_step=None, var_list=None,
gate_gradients=optimizer.Optimizer.GATE_OP,
aggregation_method=None, colocate_gradients_with_ops=False,
name=None, grad_loss=None, decay_var_list=None):
"""Add operations to minimize `loss` by updating `var_list` with decay.
This function is the same as Optimizer.minimize except that it allows to
specify the variables that should be decayed using decay_var_list.
If decay_var_list is None, all variables in var_list are decayed.
For more information see the documentation of Optimizer.minimize.
Args:
loss: A `Tensor` containing the value to minimize.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
var_list: Optional list or tuple of `Variable` objects to update to
minimize `loss`. Defaults to the list of variables collected in
the graph under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
name: Optional name for the returned operation.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
decay_var_list: Optional list of decay variables.
Returns:
An Operation that updates the variables in `var_list`. If `global_step`
was not `None`, that operation also increments `global_step`.
"""
self._decay_var_list = set(decay_var_list) if decay_var_list else False
return super(DecoupledWeightDecayExtension, self).minimize(
loss, global_step=global_step, var_list=var_list,
gate_gradients=gate_gradients, aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops, name=name,
grad_loss=grad_loss)
示例10: extend_with_decoupled_weight_decay
# 需要导入模块: from tensorflow.python.training import optimizer [as 别名]
# 或者: from tensorflow.python.training.optimizer import minimize [as 别名]
def extend_with_decoupled_weight_decay(base_optimizer):
"""Factory function returning an optimizer class with decoupled weight decay.
Returns an optimizer class. An instance of the returned class computes the
update step of `base_optimizer` and additionally decays the weights.
E.g., the class returned by
`extend_with_decoupled_weight_decay(tf.train.AdamOptimizer)` is equivalent to
`tf.contrib.opt.AdamWOptimizer`.
The API of the new optimizer class slightly differs from the API of the
base optimizer:
- The first argument to the constructor is the weight decay rate.
- `minimize` and `apply_gradients` accept the optional keyword argument
`decay_var_list`, which specifies the variables that should be decayed.
If `None`, all variables that are optimized are decayed.
Usage example:
```python
# MyAdamW is a new class
MyAdamW = extend_with_decoupled_weight_decay(tf.train.AdamOptimizer)
# Create a MyAdamW object
optimizer = MyAdamW(weight_decay=0.001, learning_rate=0.001)
sess.run(optimizer.minimize(loss, decay_variables=[var1, var2]))
Note that this extension decays weights BEFORE applying the update based
on the gradient, i.e. this extension only has the desired behaviour for
optimizers which do not depend on the value of'var' in the update step!
```
Args:
base_optimizer: An optimizer class that inherits from tf.train.Optimizer.
Returns:
A new optimizer class that inherits from DecoupledWeightDecayExtension
and base_optimizer.
"""
class OptimizerWithDecoupledWeightDecay(DecoupledWeightDecayExtension,
base_optimizer):
"""Base_optimizer with decoupled weight decay.
This class computes the update step of `base_optimizer` and
additionally decays the variable with the weight decay being decoupled from
the optimization steps w.r.t. to the loss function, as described by
Loshchilov & Hutter (https://arxiv.org/pdf/1711.05101.pdf).
For SGD variants, this simplifies hyperparameter search since
it decouples the settings of weight decay and learning rate.
For adaptive gradient algorithms, it regularizes variables with large
gradients more than L2 regularization would, which was shown to yield
better training loss and generalization error in the paper above.
"""
def __init__(self, weight_decay, *args, **kwargs):
# super delegation is necessary here
# pylint: disable=useless-super-delegation
super(OptimizerWithDecoupledWeightDecay, self).__init__(
weight_decay, *args, **kwargs)
# pylint: enable=useless-super-delegation
return OptimizerWithDecoupledWeightDecay
示例11: extend_with_decoupled_weight_decay
# 需要导入模块: from tensorflow.python.training import optimizer [as 别名]
# 或者: from tensorflow.python.training.optimizer import minimize [as 别名]
def extend_with_decoupled_weight_decay(base_optimizer):
"""Factory function returning an optimizer class with decoupled weight decay.
Returns an optimizer class. An instance of the returned class computes the
update step of `base_optimizer` and additionally decays the weights.
E.g., the class returned by
`extend_with_decoupled_weight_decay(tf.train.AdamOptimizer)` is equivalent to
`tf.contrib.opt.AdamWOptimizer`.
The API of the new optimizer class slightly differs from the API of the
base optimizer:
- The first argument to the constructor is the weight decay rate.
- `minimize` and `apply_gradients` accept the optional keyword argument
`decay_var_list`, which specifies the variables that should be decayed.
If `None`, all variables that are optimized are decayed.
Usage example:
```python
# MyAdamW is a new class
MyAdamW = extend_with_decoupled_weight_decay(tf.train.AdamOptimizer)
# Create a MyAdamW object
optimizer = MyAdamW(weight_decay=0.001, learning_rate=0.001)
sess.run(optimizer.minimize(loss, decay_variables=[var1, var2]))
Note that this extension decays weights BEFORE applying the update based
on the gradient, i.e. this extension only has the desired behaviour for
optimizers which do not depend on the value of'var' in the update step!
```
Args:
base_optimizer: An optimizer class that inherits from tf.train.Optimizer.
Returns:
A new optimizer class that inherits from DecoupledWeightDecayExtension
and base_optimizer.
"""
class OptimizerWithDecoupledWeightDecay(DecoupledWeightDecayExtension,
base_optimizer):
"""Base_optimizer with decoupled weight decay.
This class computes the update step of `base_optimizer` and
additionally decays the variable with the weight decay being decoupled from
the optimization steps w.r.t. to the loss function, as described by
Loshchilov & Hutter (https://arxiv.org/pdf/1711.05101.pdf).
For SGD variants, this simplifies hyperparameter search since
it decouples the settings of weight decay and learning rate.
For adaptive gradient algorithms, it regularizes variables with large
gradients more than L2 regularization would, which was shown to yield
better training loss and generalization error in the paper above.
"""
def __init__(self, weight_decay, *args, **kwargs):
# super delegation is necessary here
# pylint: disable=useless-super-delegation
super(OptimizerWithDecoupledWeightDecay, self).__init__(
weight_decay, *args, **kwargs)
# pylint: enable=useless-super-delegation
return OptimizerWithDecoupledWeightDecay
# @tf_export("contrib.opt.MomentumWOptimizer")
示例12: extend_with_decoupled_weight_decay
# 需要导入模块: from tensorflow.python.training import optimizer [as 别名]
# 或者: from tensorflow.python.training.optimizer import minimize [as 别名]
def extend_with_decoupled_weight_decay(base_optimizer):
"""Factory function returning an optimizer class with decoupled weight decay.
Returns an optimizer class. An instance of the returned class computes the
update step of `base_optimizer` and additionally decays the weights.
E.g., the class returned by
`extend_with_decoupled_weight_decay(tf.train.AdamOptimizer)` is equivalent to
`tf.contrib.opt.AdamWOptimizer`.
The API of the new optimizer class slightly differs from the API of the
base optimizer:
- The first argument to the constructor is the weight decay rate.
- `minimize` and `apply_gradients` accept the optional keyword argument
`decay_var_list`, which specifies the variables that should be decayed.
If `None`, all variables that are optimized are decayed.
Usage example:
```python
# MyAdamW is a new class
MyAdamW = extend_with_decoupled_weight_decay(tf.train.AdamOptimizer)
# Create a MyAdamW object
optimizer = MyAdamW(weight_decay=0.001, learning_rate=0.001)
sess.run(optimizer.minimize(loss, decay_variables=[var1, var2]))
Note that this extension decays weights BEFORE applying the update based
on the gradient, i.e. this extension only has the desired behaviour for
optimizers which do not depend on the value of'var' in the update step!
```
Args:
base_optimizer: An optimizer class that inherits from tf.train.Optimizer.
Returns:
A new optimizer class that inherits from DecoupledWeightDecayExtension
and base_optimizer.
"""
class OptimizerWithDecoupledWeightDecay(DecoupledWeightDecayExtension,
base_optimizer):
"""Base_optimizer with decoupled weight decay.
This class computes the update step of `base_optimizer` and
additionally decays the variable with the weight decay being decoupled from
the optimization steps w.r.t. to the loss function, as described by
Loshchilov & Hutter (https://arxiv.org/pdf/1711.05101.pdf).
For SGD variants, this simplifies hyperparameter search since
it decouples the settings of weight decay and learning rate.
For adaptive gradient algorithms, it regularizes variables with large
gradients more than L2 regularization would, which was shown to yield
better training loss and generalization error in the paper above.
"""
def __init__(self, weight_decay, *args, **kwargs):
# super delegation is necessary here
# pylint: disable=useless-super-delegation
super(OptimizerWithDecoupledWeightDecay, self).__init__(
weight_decay, *args, **kwargs)
# pylint: enable=useless-super-delegation
return OptimizerWithDecoupledWeightDecay
# @tf_export("contrib.opt.MomentumWOptimizer")
示例13: extend_with_decoupled_weight_decay
# 需要导入模块: from tensorflow.python.training import optimizer [as 别名]
# 或者: from tensorflow.python.training.optimizer import minimize [as 别名]
def extend_with_decoupled_weight_decay(base_optimizer):
"""Factory function returning an optimizer class with decoupled weight decay.
Returns an optimizer class. An instance of the returned class computes the
update step of `base_optimizer` and additionally decays the weights.
E.g., the class returned by
`extend_with_decoupled_weight_decay(tf.train.AdamOptimizer)` is equivalent to
`tf.contrib.opt.AdamWOptimizer`.
The API of the new optimizer class slightly differs from the API of the
base optimizer:
- The first argument to the constructor is the weight decay rate.
- `minimize` and `apply_gradients` accept the optional keyword argument
`decay_var_list`, which specifies the variables that should be decayed.
If `None`, all variables that are optimized are decayed.
Usage example:
```python
# MyAdamW is a new class
MyAdamW = extend_with_decoupled_weight_decay(tf.train.AdamOptimizer)
# Create a MyAdamW object
optimizer = MyAdamW(weight_decay=0.001, learning_rate=0.001)
sess.run(optimizer.minimize(loss, decay_variables=[var1, var2]))
Note that this extension decays weights BEFORE applying the update based
on the gradient, i.e. this extension only has the desired behaviour for
optimizers which do not depend on the value of'var' in the update step!
```
Args:
base_optimizer: An optimizer class that inherits from tf.train.Optimizer.
Returns:
A new optimizer class that inherits from DecoupledWeightDecayExtension
and base_optimizer.
"""
class OptimizerWithDecoupledWeightDecay(DecoupledWeightDecayExtension,
base_optimizer):
"""Base_optimizer with decoupled weight decay.
This class computes the update step of `base_optimizer` and
additionally decays the variable with the weight decay being decoupled from
the optimization steps w.r.t. to the loss function, as described by
Loshchilov & Hutter (https://arxiv.org/pdf/1711.05101.pdf).
For SGD variants, this simplifies hyperparameter search since
it decouples the settings of weight decay and learning rate.
For adaptive gradient algorithms, it regularizes variables with large
gradients more than L2 regularization would, which was shown to yield
better training loss and generalization error in the paper above.
"""
def __init__(self, weight_decay, *args, **kwargs):
# super delegation is necessary here
# pylint: disable=useless-super-delegation
super(OptimizerWithDecoupledWeightDecay, self).__init__(
weight_decay, *args, **kwargs)
# pylint: enable=useless-super-delegation
return OptimizerWithDecoupledWeightDecay