当前位置: 首页>>代码示例>>Python>>正文


Python state_ops.assign方法代码示例

本文整理汇总了Python中tensorflow.python.ops.state_ops.assign方法的典型用法代码示例。如果您正苦于以下问题:Python state_ops.assign方法的具体用法?Python state_ops.assign怎么用?Python state_ops.assign使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.state_ops的用法示例。


在下文中一共展示了state_ops.assign方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _apply_dense

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign [as 别名]
def _apply_dense(self, grad, var):
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)

        # the following equations given in [1]
        # m_t = beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, "m")
        m_t = state_ops.assign(m, beta1_t * m + (1. - beta1_t) * grad, use_locking=self._use_locking)

        # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
        v = self.get_slot(var, "v")
        v_t = state_ops.assign(v, beta2_t * v + (1. - beta2_t) * tf.square(grad), use_locking=self._use_locking)
        v_prime = self.get_slot(var, "v_prime")
        v_t_prime = state_ops.assign(v_prime, tf.maximum(v_prime, v_t))

        var_update = state_ops.assign_sub(var,
                                          lr_t * m_t / (tf.sqrt(v_t_prime) + epsilon_t),
                                          use_locking=self._use_locking)

        return control_flow_ops.group(*[var_update, m_t, v_t, v_t_prime])

    # keras Nadam update rule 
开发者ID:ChenglongChen,项目名称:tensorflow-XNN,代码行数:26,代码来源:optimizer.py

示例2: _finish

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign [as 别名]
def _finish(self, update_ops, name_scope):
        # Update the power accumulators.
        with ops.control_dependencies(update_ops):
            with ops.colocate_with(self._iterations):
                update_beta1 = self._beta1_power.assign(
                    self._beta1_power * self._beta1_t,
                    use_locking=self._use_locking)
                update_beta2 = self._beta2_power.assign(
                    self._beta2_power * self._beta2_t,
                    use_locking=self._use_locking)
                t = self._iterations + 1.
                update_iterations = self._iterations.assign(t, use_locking=self._use_locking)
                momentum_cache_power = self._get_momentum_cache(self._schedule_decay_t, t)
                momentum_cache_t = self._beta1_t * (1. - 0.5 * momentum_cache_power)
                update_m_schedule = self._m_schedule.assign(
                    self._m_schedule * momentum_cache_t,
                    use_locking=self._use_locking)
        return control_flow_ops.group(
            *update_ops + [update_beta1, update_beta2] + [update_iterations, update_m_schedule],
            name=name_scope) 
开发者ID:ChenglongChen,项目名称:tensorflow-XNN,代码行数:22,代码来源:optimizer.py

示例3: _update_t_cur_eta_t_v2

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign [as 别名]
def _update_t_cur_eta_t_v2(self, lr_t=None, var=None):  # tf.keras
    t_cur_update, eta_t_update = None, None  # in case not assigned

    # update `t_cur` if iterating last `(grad, var)`
    iteration_done = self._updates_processed == (self._updates_per_iter - 1)
    if iteration_done:
        t_cur_update = state_ops.assign_add(self.t_cur, 1,
                                            use_locking=self._use_locking)
        self._updates_processed = 0  # reset
    else:
        self._updates_processed += 1

    # Cosine annealing
    if self.use_cosine_annealing and iteration_done:
        # ensure eta_t is updated AFTER t_cur
        with ops.control_dependencies([t_cur_update]):
            eta_t_update = state_ops.assign(self.eta_t, _compute_eta_t(self),
                                            use_locking=self._use_locking)
        self.lr_t = lr_t * self.eta_t  # for external tracking

    return iteration_done, t_cur_update, eta_t_update 
开发者ID:OverLordGoldDragon,项目名称:keras-adamw,代码行数:23,代码来源:utils.py

示例4: value

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign [as 别名]
def value(self):
    """Returns the last snapshot of this variable.

    You usually do not need to call this method as all ops that need the value
    of the variable call it automatically through a `convert_to_tensor()` call.

    Returns a `Tensor` which holds the value of the variable.  You can not
    assign a new value to this tensor as it is not a reference to the variable.

    To avoid copies, if the consumer of the returned value is on the same device
    as the variable, this actually returns the live value of the variable, not
    a copy.  Updates to the variable are seen by the consumer.  If the consumer
    is on a different device it will get a copy of the variable.

    Returns:
      A `Tensor` containing the value of the variable.
    """
    return self._snapshot 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:20,代码来源:variables.py

示例5: _prepare_gramian

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign [as 别名]
def _prepare_gramian(self, factors, gramian):
    """Helper function to create ops to prepare/calculate gramian.

    Args:
      factors: Variable or list of Variable representing (sharded) factors.
        Used to compute the updated corresponding gramian value.
      gramian: Variable storing the gramian calculated from the factors.

    Returns:
      A op that updates the gramian with the calcuated value from the factors.
    """
    partial_gramians = []
    for f in factors:
      with ops.colocate_with(f):
        partial_gramians.append(math_ops.matmul(f, f, transpose_a=True))

    with ops.colocate_with(gramian):
      prep_gramian = state_ops.assign(gramian,
                                      math_ops.add_n(partial_gramians)).op

    return prep_gramian 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:23,代码来源:factorization_ops.py

示例6: scatter_update

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign [as 别名]
def scatter_update(cls, factor, indices, values, sharding_func, name=None):
    """Helper function for doing sharded scatter update."""
    assert isinstance(factor, list)
    if len(factor) == 1:
      with ops.colocate_with(factor[0]):
        # TODO(agarwal): assign instead of scatter update for full batch update.
        return state_ops.scatter_update(factor[0], indices, values,
                                        name=name).op
    else:
      num_shards = len(factor)
      assignments, new_ids = sharding_func(indices)
      assert assignments is not None
      assignments = math_ops.cast(assignments, dtypes.int32)
      sharded_ids = data_flow_ops.dynamic_partition(new_ids, assignments,
                                                    num_shards)
      sharded_values = data_flow_ops.dynamic_partition(values, assignments,
                                                       num_shards)
      updates = []
      for i in xrange(num_shards):
        updates.append(state_ops.scatter_update(factor[i], sharded_ids[i],
                                                sharded_values[i]))
      return control_flow_ops.group(*updates, name=name) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:24,代码来源:factorization_ops.py

示例7: batch_set_value

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign [as 别名]
def batch_set_value(tuples):
  """Sets the values of many tensor variables at once.

  Arguments:
      tuples: a list of tuples `(tensor, value)`.
          `value` should be a Numpy array.
  """
  if tuples:
    assign_ops = []
    feed_dict = {}
    for x, value in tuples:
      value = np.asarray(value)
      tf_dtype = _convert_string_dtype(x.dtype.name.split('_')[0])
      if hasattr(x, '_assign_placeholder'):
        assign_placeholder = x._assign_placeholder
        assign_op = x._assign_op
      else:
        assign_placeholder = array_ops.placeholder(tf_dtype, shape=value.shape)
        assign_op = x.assign(assign_placeholder)
        x._assign_placeholder = assign_placeholder
        x._assign_op = assign_op
      assign_ops.append(assign_op)
      feed_dict[assign_placeholder] = value
    get_session().run(assign_ops, feed_dict=feed_dict) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:26,代码来源:backend.py

示例8: __init__

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign [as 别名]
def __init__(self, inputs, outputs, updates=None):
    updates = updates or []
    if not isinstance(inputs, (list, tuple)):
      raise TypeError('`inputs` to a TensorFlow backend function '
                      'should be a list or tuple.')
    if not isinstance(outputs, (list, tuple)):
      raise TypeError('`outputs` of a TensorFlow backend function '
                      'should be a list or tuple.')
    if not isinstance(updates, (list, tuple)):
      raise TypeError('`updates` in a TensorFlow backend function '
                      'should be a list or tuple.')
    self.inputs = list(inputs)
    self.outputs = list(outputs)
    with ops.control_dependencies(self.outputs):
      updates_ops = []
      for update in updates:
        if isinstance(update, tuple):
          p, new_p = update
          updates_ops.append(state_ops.assign(p, new_p))
        else:
          # assumed already an op
          updates_ops.append(update)
      self.updates_op = control_flow_ops.group(*updates_ops) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:25,代码来源:backend.py

示例9: record_variable_inits

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign [as 别名]
def record_variable_inits(self):
    """Context manager to record Variable initializations.

    Sets _in_variable_creation to True before a Variable is initialized.

    NOTE(keveman): This is used for recording the list of assign ops
    that are used to initialize variables. It relies on the fact that
    the constructor of Variable class creates exactly one assign op that is
    used for initializing the variable. Variable ops not created using the
    variables.Variable class are not added to _init_ops and hence not
    initialized automatically.

    """
    old_init = getattr(variables.Variable, '__init__')

    def record(*args, **kwargs):
      self._in_variable_creation = True
      old_init(*args, **kwargs)
      self._in_variable_creation = False

    setattr(variables.Variable, '__init__', record)
    yield
    setattr(variables.Variable, '__init__', old_init)
  # pylint: enable=g-doc-return-or-yield 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:26,代码来源:imperative_graph.py

示例10: scatter_update

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign [as 别名]
def scatter_update(cls, factor, indices, values, sharding_func):
    """Helper function for doing sharded scatter update."""
    assert isinstance(factor, list)
    if len(factor) == 1:
      with ops.colocate_with(factor[0]):
        # TODO(agarwal): assign instead of scatter update for full batch update.
        return state_ops.scatter_update(factor[0], indices, values).op
    else:
      num_shards = len(factor)
      assignments, new_ids = sharding_func(indices)
      assert assignments is not None
      assignments = math_ops.cast(assignments, dtypes.int32)
      sharded_ids = data_flow_ops.dynamic_partition(new_ids, assignments,
                                                    num_shards)
      sharded_values = data_flow_ops.dynamic_partition(values, assignments,
                                                       num_shards)
      updates = []
      for i in xrange(num_shards):
        updates.append(
            state_ops.scatter_update(factor[i], sharded_ids[i], sharded_values[
                i]))
      return control_flow_ops.group(*updates) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:24,代码来源:factorization_ops.py

示例11: _apply_dense

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign [as 别名]
def _apply_dense(self, grad, var):
        beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype)
        beta2_power = math_ops.cast(self._beta2_power, var.dtype.base_dtype)
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)

        lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))

        # m_t = beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, "m")
        m_scaled_g_values = grad * (1 - beta1_t)
        m_t = state_ops.assign(m, beta1_t * m + m_scaled_g_values,
                               use_locking=self._use_locking)

        # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
        v = self.get_slot(var, "v")
        v_scaled_g_values = (grad * grad) * (1 - beta2_t)
        v_t = state_ops.assign(v, beta2_t * v + v_scaled_g_values,
                               use_locking=self._use_locking)

        # amsgrad
        vhat = self.get_slot(var, "vhat")
        vhat_t = state_ops.assign(vhat, math_ops.maximum(v_t, vhat))
        v_sqrt = math_ops.sqrt(vhat_t)

        var_update = state_ops.assign_sub(var, lr * m_t / (v_sqrt + epsilon_t),
                                          use_locking=self._use_locking)
        return control_flow_ops.group(*[var_update, m_t, v_t, vhat_t]) 
开发者ID:imsb-uke,项目名称:scGAN,代码行数:32,代码来源:AMSGrad.py

示例12: _resource_apply_dense

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign [as 别名]
def _resource_apply_dense(self, grad, var):
        var = var.handle
        beta1_power = math_ops.cast(self._beta1_power, grad.dtype.base_dtype)
        beta2_power = math_ops.cast(self._beta2_power, grad.dtype.base_dtype)
        lr_t = math_ops.cast(self._lr_t, grad.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, grad.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, grad.dtype.base_dtype)
        epsilon_t = math_ops.cast(self._epsilon_t, grad.dtype.base_dtype)

        lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))

        # m_t = beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, "m").handle
        m_scaled_g_values = grad * (1 - beta1_t)
        m_t = state_ops.assign(m, beta1_t * m + m_scaled_g_values,
                               use_locking=self._use_locking)

        # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
        v = self.get_slot(var, "v").handle
        v_scaled_g_values = (grad * grad) * (1 - beta2_t)
        v_t = state_ops.assign(v, beta2_t * v + v_scaled_g_values,
                               use_locking=self._use_locking)

        # amsgrad
        vhat = self.get_slot(var, "vhat").handle
        vhat_t = state_ops.assign(vhat, math_ops.maximum(v_t, vhat))
        v_sqrt = math_ops.sqrt(vhat_t)

        var_update = state_ops.assign_sub(var, lr * m_t / (v_sqrt + epsilon_t),
                                          use_locking=self._use_locking)
        return control_flow_ops.group(*[var_update, m_t, v_t, vhat_t]) 
开发者ID:imsb-uke,项目名称:scGAN,代码行数:33,代码来源:AMSGrad.py

示例13: _apply_sparse_shared

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign [as 别名]
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
        beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype)
        beta2_power = math_ops.cast(self._beta2_power, var.dtype.base_dtype)
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)

        lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))

        # m_t = beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, "m")
        m_scaled_g_values = grad * (1 - beta1_t)
        m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)
        with ops.control_dependencies([m_t]):
            m_t = scatter_add(m, indices, m_scaled_g_values)

        # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
        v = self.get_slot(var, "v")
        v_scaled_g_values = (grad * grad) * (1 - beta2_t)
        v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
        with ops.control_dependencies([v_t]):
            v_t = scatter_add(v, indices, v_scaled_g_values)

        # amsgrad
        vhat = self.get_slot(var, "vhat")
        vhat_t = state_ops.assign(vhat, math_ops.maximum(v_t, vhat))
        v_sqrt = math_ops.sqrt(vhat_t)
        var_update = state_ops.assign_sub(var, lr * m_t / (v_sqrt + epsilon_t),
                                          use_locking=self._use_locking)
        return control_flow_ops.group(*[var_update, m_t, v_t, vhat_t]) 
开发者ID:imsb-uke,项目名称:scGAN,代码行数:33,代码来源:AMSGrad.py

示例14: _finish

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign [as 别名]
def _finish(self, update_ops, name_scope):
        # Update the power accumulators.
        with ops.control_dependencies(update_ops):
            with ops.colocate_with(self._beta1_power):
                update_beta1 = self._beta1_power.assign(
                    self._beta1_power * self._beta1_t,
                    use_locking=self._use_locking)
                update_beta2 = self._beta2_power.assign(
                    self._beta2_power * self._beta2_t,
                    use_locking=self._use_locking)
        return control_flow_ops.group(*update_ops + [update_beta1, update_beta2],
                                      name=name_scope) 
开发者ID:imsb-uke,项目名称:scGAN,代码行数:14,代码来源:AMSGrad.py

示例15: _update_t_cur_eta_t

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign [as 别名]
def _update_t_cur_eta_t(self):  # keras
    self.updates.append(state_ops.assign_add(self.t_cur, 1))
    # Cosine annealing
    if self.use_cosine_annealing:
        # ensure eta_t is updated AFTER t_cur
        with ops.control_dependencies([self.updates[-1]]):
            self.updates.append(state_ops.assign(self.eta_t,
                                                 _compute_eta_t(self))) 
开发者ID:OverLordGoldDragon,项目名称:keras-adamw,代码行数:10,代码来源:utils.py


注:本文中的tensorflow.python.ops.state_ops.assign方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。