當前位置: 首頁>>代碼示例>>Python>>正文


Python optimizer.Optimizer方法代碼示例

本文整理匯總了Python中tensorflow.python.training.optimizer.Optimizer方法的典型用法代碼示例。如果您正苦於以下問題:Python optimizer.Optimizer方法的具體用法?Python optimizer.Optimizer怎麽用?Python optimizer.Optimizer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.training.optimizer的用法示例。


在下文中一共展示了optimizer.Optimizer方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from tensorflow.python.training import optimizer [as 別名]
# 或者: from tensorflow.python.training.optimizer import Optimizer [as 別名]
def __init__(self,
               optimizer1,
               optimizer2,
               switch,
               use_locking=False,
               name='Composite'):
    """Construct a new Composite optimizer.

    Args:
      optimizer1: A tf.python.training.optimizer.Optimizer object.
      optimizer2: A tf.python.training.optimizer.Optimizer object.
      switch: A tf.bool Tensor, selecting whether to use the first or the second
        optimizer.
      use_locking: Bool. If True apply use locks to prevent concurrent updates
        to variables.
      name: Optional name prefix for the operations created when applying
        gradients.  Defaults to "Composite".
    """
    super(CompositeOptimizer, self).__init__(use_locking, name)
    self._optimizer1 = optimizer1
    self._optimizer2 = optimizer2
    self._switch = switch 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:24,代碼來源:composite_optimizer.py

示例2: __init__

# 需要導入模塊: from tensorflow.python.training import optimizer [as 別名]
# 或者: from tensorflow.python.training.optimizer import Optimizer [as 別名]
def __init__(self,
               opt,
               staleness,
               use_locking=False,
               name="DropStaleGradient"):
    """Constructs a new DropStaleGradientOptimizer.

    Args:
      opt: The actual optimizer that will be used to compute and apply the
           gradients. Must be one of the Optimizer classes.
      staleness: The maximum staleness allowed for the optimizer.
      use_locking: If `True` use locks for clip update operations.
      name: Optional name prefix for the operations created when applying
            gradients. Defaults to "DropStaleGradient".
    """
    super(DropStaleGradientOptimizer, self).__init__(use_locking, name)
    self._opt = opt
    self._staleness = staleness 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:20,代碼來源:drop_stale_gradient_optimizer.py

示例3: __init__

# 需要導入模塊: from tensorflow.python.training import optimizer [as 別名]
# 或者: from tensorflow.python.training.optimizer import Optimizer [as 別名]
def __init__(self, opt, average_decay=0.9999, num_updates=None,
               sequential_update=True):
    """Construct a new MovingAverageOptimizer.

    Args:
      opt: A tf.Optimizer that will be used to compute and apply gradients.
      average_decay: Float.  Decay to use to maintain the moving averages
                     of trained variables.
                     See tf.train.ExponentialMovingAverage for details.
      num_updates: Optional count of number of updates applied to variables.
                   See tf.train.ExponentialMovingAverage for details.
      sequential_update: Bool. If False, will compute the moving average at the
                         same time as the model is updated, potentially doing
                         benign data races.
                         If True, will update the moving average after gradient
                         updates.
    """
    self._optimizer = opt
    self._ema = moving_averages.ExponentialMovingAverage(
        average_decay, num_updates=num_updates)
    self._variable_map = None
    self._sequential_update = sequential_update 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:24,代碼來源:moving_average_optimizer.py

示例4: __init__

# 需要導入模塊: from tensorflow.python.training import optimizer [as 別名]
# 或者: from tensorflow.python.training.optimizer import Optimizer [as 別名]
def __init__(self,
               optimizer1,
               optimizer2,
               switch,
               use_locking=False,
               name="Composite"):
    """Construct a new Composite optimizer.

    Args:
      optimizer1: A tf.python.training.optimizer.Optimizer object.
      optimizer2: A tf.python.training.optimizer.Optimizer object.
      switch: A tf.bool Tensor, selecting whether to use the first or the second
        optimizer.
      use_locking: Bool. If True apply use locks to prevent concurrent updates
        to variables.
      name: Optional name prefix for the operations created when applying
        gradients.  Defaults to "Composite".
    """
    super(CompositeOptimizer, self).__init__(use_locking, name)
    self._optimizer1 = optimizer1
    self._optimizer2 = optimizer2
    self._switch = switch 
開發者ID:rky0930,項目名稱:yolo_v2,代碼行數:24,代碼來源:composite_optimizer.py

示例5: validate_trainop_names

# 需要導入模塊: from tensorflow.python.training import optimizer [as 別名]
# 或者: from tensorflow.python.training.optimizer import Optimizer [as 別名]
def validate_trainop_names(self):
        """ Give names to all TrainOp, handle no names and duplicated names """
        t_len = len(self.train_ops)
        # Rename optimizers without name
        for i in range(t_len):
            if not self.train_ops[i].name:
                self.train_ops[i].name = 'Optimizer'
                self.train_ops[i].scope_name = 'Optimizer'
        # Handle duplicate names
        for i in range(t_len):
            dupl = 0
            for j in range(i+1, t_len):
                if not self.train_ops[i].name:
                    break
                if self.train_ops[i].name == self.train_ops[j].name:
                    if dupl == 0:
                        self.train_ops[i].name += '_' + str(dupl)
                        self.train_ops[i].scope_name = self.train_ops[i].name
                    dupl += 1
                    self.train_ops[j].name += '_' + str(dupl)
                    self.train_ops[j].scope_name = self.train_ops[j].name 
開發者ID:limbo018,項目名稱:FRU,代碼行數:23,代碼來源:trainer.py

示例6: get_slot

# 需要導入模塊: from tensorflow.python.training import optimizer [as 別名]
# 或者: from tensorflow.python.training.optimizer import Optimizer [as 別名]
def get_slot(self, *args, **kwargs):
    """Return a slot named "name" created for "var" by the Optimizer.

    This simply wraps the get_slot() from the actual optimizer.

    Args:
      *args: Arguments for get_slot().
      **kwargs: Keyword arguments for get_slot().

    Returns:
      The `Variable` for the slot if it was created, `None` otherwise.
    """
    return self._opt.get_slot(*args, **kwargs) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:15,代碼來源:sync_replicas_optimizer.py

示例7: get_slot_names

# 需要導入模塊: from tensorflow.python.training import optimizer [as 別名]
# 或者: from tensorflow.python.training.optimizer import Optimizer [as 別名]
def get_slot_names(self, *args, **kwargs):
    """Return a list of the names of slots created by the `Optimizer`.

    This simply wraps the get_slot_names() from the actual optimizer.

    Args:
      *args: Arguments for get_slot().
      **kwargs: Keyword arguments for get_slot().

    Returns:
      A list of strings.
    """
    return self._opt.get_slot_names(*args, **kwargs) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:15,代碼來源:sync_replicas_optimizer.py

示例8: set_weights

# 需要導入模塊: from tensorflow.python.training import optimizer [as 別名]
# 或者: from tensorflow.python.training.optimizer import Optimizer [as 別名]
def set_weights(self, weights):
    """Sets the weights of the optimizer, from Numpy arrays.

    Should only be called after computing the gradients
    (otherwise the optimizer has no weights).

    Arguments:
        weights: a list of Numpy arrays. The number
            of arrays and their shape must match
            number of the dimensions of the weights
            of the optimizer (i.e. it should match the
            output of `get_weights`).

    Raises:
        ValueError: in case of incompatible weight shapes.
    """
    params = self.weights
    weight_value_tuples = []
    param_values = K.batch_get_value(params)
    for pv, p, w in zip(param_values, params, weights):
      if pv.shape != w.shape:
        raise ValueError('Optimizer weight shape ' + str(pv.shape) +
                         ' not compatible with '
                         'provided weight shape ' + str(w.shape))
      weight_value_tuples.append((p, w))
    K.batch_set_value(weight_value_tuples) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:28,代碼來源:optimizers.py

示例9: deserialize

# 需要導入模塊: from tensorflow.python.training import optimizer [as 別名]
# 或者: from tensorflow.python.training.optimizer import Optimizer [as 別名]
def deserialize(config, custom_objects=None):
  """Inverse of the `serialize` function.

  Arguments:
      config: Optimizer configuration dictionary.
      custom_objects: Optional dictionary mapping
          names (strings) to custom objects
          (classes and functions)
          to be considered during deserialization.

  Returns:
      A Keras Optimizer instance.
  """
  all_classes = {
      'sgd': SGD,
      'rmsprop': RMSprop,
      'adagrad': Adagrad,
      'adadelta': Adadelta,
      'adam': Adam,
      'adamax': Adamax,
      'nadam': Nadam,
      'tfoptimizer': TFOptimizer,
  }
  # Make deserialization case-insensitive for built-in optimizers.
  if config['class_name'].lower() in all_classes:
    config['class_name'] = config['class_name'].lower()
  return deserialize_keras_object(
      config,
      module_objects=all_classes,
      custom_objects=custom_objects,
      printable_module_name='optimizer') 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:33,代碼來源:optimizers.py

示例10: __init__

# 需要導入模塊: from tensorflow.python.training import optimizer [as 別名]
# 或者: from tensorflow.python.training.optimizer import Optimizer [as 別名]
def __init__(self,
               opt,
               vars_to_clip_dims,
               max_norm,
               use_locking=False,
               colocate_clip_ops_with_vars=False,
               name="VariableClipping"):
    """Construct a new clip-norm optimizer.

    Args:
      opt: The actual optimizer that will be used to compute and apply the
        gradients. Must be one of the Optimizer classes.
      vars_to_clip_dims: A dict with keys as Variables and values as lists
        of dimensions along which to compute the L2-norm.  See
        `tf.clip_by_norm` for more details.
      max_norm: The L2-norm to clip to, for all variables specified.
      use_locking: If `True` use locks for clip update operations.
      colocate_clip_ops_with_vars: If `True`, try colocating the clip norm
        ops with the corresponding variable.
      name: Optional name prefix for the operations created when applying
        gradients.  Defaults to "VariableClipping".
    """
    super(VariableClippingOptimizer, self).__init__(use_locking, name)
    self._opt = opt
    # Defensive copy of input dict
    self._vars_to_clip_dims = {
        var: clip_dims[:] for var, clip_dims in vars_to_clip_dims.items()}
    self._max_norm = max_norm
    self._colocate_clip_ops_with_vars = colocate_clip_ops_with_vars 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:31,代碼來源:variable_clipping_optimizer.py

示例11: apply_gradients

# 需要導入模塊: from tensorflow.python.training import optimizer [as 別名]
# 或者: from tensorflow.python.training.optimizer import Optimizer [as 別名]
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    """Wraps the original apply_gradient of the optimizer.

    Args:
      grads_and_vars: List of (gradient, variable) pairs as returned by
        `compute_gradients()`.
      global_step: Optional `Variable` to increment by one after the
        variables have been updated.
      name: Optional name for the returned operation.  Default to the
        name passed to the `Optimizer` constructor.
    Returns:
      An `Operation` that applies the specified gradients. If `global_step`
      was not None, that operation also increments `global_step`.
    """
    pre_op = self._before_apply_gradients(grads_and_vars)
    with ops.control_dependencies([pre_op]):
      optimizer_update = self._optimizer.apply_gradients(
          grads_and_vars, global_step=global_step, name=name)
    # We get the default one after calling the super.apply_gradient(), since
    # we want to preserve original behavior of the optimizer: don't increment
    # anything if no global_step is passed. But we need the global step for
    # the mask_update.
    global_step = (global_step if global_step is not None
                   else training_util.get_or_create_global_step())
    self._global_step = global_step
    with ops.control_dependencies([optimizer_update]):
      return self.cond_mask_update_op(global_step, control_flow_ops.no_op) 
開發者ID:google-research,項目名稱:rigl,代碼行數:29,代碼來源:sparse_optimizers.py


注:本文中的tensorflow.python.training.optimizer.Optimizer方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。