当前位置: 首页>>代码示例>>Python>>正文


Python state_ops.assign_sub方法代码示例

本文整理汇总了Python中tensorflow.python.ops.state_ops.assign_sub方法的典型用法代码示例。如果您正苦于以下问题:Python state_ops.assign_sub方法的具体用法?Python state_ops.assign_sub怎么用?Python state_ops.assign_sub使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.state_ops的用法示例。


在下文中一共展示了state_ops.assign_sub方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _apply_dense

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign_sub [as 别名]
def _apply_dense(self, grad, var):
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)

        # the following equations given in [1]
        # m_t = beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, "m")
        m_t = state_ops.assign(m, beta1_t * m + (1. - beta1_t) * grad, use_locking=self._use_locking)

        # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
        v = self.get_slot(var, "v")
        v_t = state_ops.assign(v, beta2_t * v + (1. - beta2_t) * tf.square(grad), use_locking=self._use_locking)
        v_prime = self.get_slot(var, "v_prime")
        v_t_prime = state_ops.assign(v_prime, tf.maximum(v_prime, v_t))

        var_update = state_ops.assign_sub(var,
                                          lr_t * m_t / (tf.sqrt(v_t_prime) + epsilon_t),
                                          use_locking=self._use_locking)

        return control_flow_ops.group(*[var_update, m_t, v_t, v_t_prime])

    # keras Nadam update rule 
开发者ID:ChenglongChen,项目名称:tensorflow-XNN,代码行数:26,代码来源:optimizer.py

示例2: _apply_dense

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign_sub [as 别名]
def _apply_dense(self, grad, var):
    lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
    beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
    beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
    if var.dtype.base_dtype == tf.float16:
        eps = 1e-7  # Can't use 1e-8 due to underflow -- not sure if it makes a big difference.
    else:
        eps = 1e-8

    v = self.get_slot(var, "v")
    v_t = v.assign(beta2_t * v + (1. - beta2_t) * tf.square(grad))
    m = self.get_slot(var, "m")
    m_t = m.assign( beta1_t * m + (1. - beta1_t) * grad )
    v_t_hat = tf.div(v_t, 1. - beta2_t)
    m_t_hat = tf.div(m_t, 1. - beta1_t)
    
    g_t = tf.div( m_t, tf.sqrt(v_t)+eps )
    g_t_1 = self.get_slot(var, "g")
    g_t = g_t_1.assign( g_t )

    var_update = state_ops.assign_sub(var, 2. * lr_t * g_t - lr_t * g_t_1) #Adam would be lr_t * g_t
    return control_flow_ops.group(*[var_update, m_t, v_t, g_t]) 
开发者ID:HyperGAN,项目名称:HyperGAN,代码行数:24,代码来源:adamirror.py

示例3: _apply_dense

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign_sub [as 别名]
def _apply_dense(self, grad, var):
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        if var.dtype.base_dtype == tf.float16:
            eps = 1e-7  # Can't use 1e-8 due to underflow -- not sure if it makes a big difference.
        else:
            eps = 1e-8

        v = self.get_slot(var, "v")
        v_t = v.assign(beta1_t * v + (1. - beta1_t) * grad)
        m = self.get_slot(var, "m")
        m_t = m.assign(tf.maximum(beta2_t * m + eps, tf.abs(grad)))
        g_t = v_t / m_t

        var_update = state_ops.assign_sub(var, lr_t * g_t)
        return control_flow_ops.group(*[var_update, m_t, v_t]) 
开发者ID:daniellerch,项目名称:aletheia,代码行数:19,代码来源:models.py

示例4: _apply_dense

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign_sub [as 别名]
def _apply_dense(self, grad, var):
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        if var.dtype.base_dtype == tf.float16:
            eps = 1e-7
            # Can't use 1e-8 due to underflow -- not sure if it makes a big difference.
        else:
            eps = 1e-8

        v = self.get_slot(var, "v")
        v_t = v.assign(beta1_t * v + (1. - beta1_t) * grad)
        m = self.get_slot(var, "m")
        m_t = m.assign(tf.maximum(beta2_t * m + eps, tf.abs(grad)))
        g_t = v_t / m_t

        var_update = state_ops.assign_sub(var, lr_t * g_t)
        return control_flow_ops.group(*[var_update, m_t, v_t]) 
开发者ID:vanzytay,项目名称:EMNLP2018_NLI,代码行数:20,代码来源:utilities.py

示例5: _apply_dense

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign_sub [as 别名]
def _apply_dense(self, grad, var):
        beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype)
        beta2_power = math_ops.cast(self._beta2_power, var.dtype.base_dtype)
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)

        lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))

        # m_t = beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, "m")
        m_scaled_g_values = grad * (1 - beta1_t)
        m_t = state_ops.assign(m, beta1_t * m + m_scaled_g_values,
                               use_locking=self._use_locking)

        # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
        v = self.get_slot(var, "v")
        v_scaled_g_values = (grad * grad) * (1 - beta2_t)
        v_t = state_ops.assign(v, beta2_t * v + v_scaled_g_values,
                               use_locking=self._use_locking)

        # amsgrad
        vhat = self.get_slot(var, "vhat")
        vhat_t = state_ops.assign(vhat, math_ops.maximum(v_t, vhat))
        v_sqrt = math_ops.sqrt(vhat_t)

        var_update = state_ops.assign_sub(var, lr * m_t / (v_sqrt + epsilon_t),
                                          use_locking=self._use_locking)
        return control_flow_ops.group(*[var_update, m_t, v_t, vhat_t]) 
开发者ID:imsb-uke,项目名称:scGAN,代码行数:32,代码来源:AMSGrad.py

示例6: _resource_apply_dense

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign_sub [as 别名]
def _resource_apply_dense(self, grad, var):
        var = var.handle
        beta1_power = math_ops.cast(self._beta1_power, grad.dtype.base_dtype)
        beta2_power = math_ops.cast(self._beta2_power, grad.dtype.base_dtype)
        lr_t = math_ops.cast(self._lr_t, grad.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, grad.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, grad.dtype.base_dtype)
        epsilon_t = math_ops.cast(self._epsilon_t, grad.dtype.base_dtype)

        lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))

        # m_t = beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, "m").handle
        m_scaled_g_values = grad * (1 - beta1_t)
        m_t = state_ops.assign(m, beta1_t * m + m_scaled_g_values,
                               use_locking=self._use_locking)

        # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
        v = self.get_slot(var, "v").handle
        v_scaled_g_values = (grad * grad) * (1 - beta2_t)
        v_t = state_ops.assign(v, beta2_t * v + v_scaled_g_values,
                               use_locking=self._use_locking)

        # amsgrad
        vhat = self.get_slot(var, "vhat").handle
        vhat_t = state_ops.assign(vhat, math_ops.maximum(v_t, vhat))
        v_sqrt = math_ops.sqrt(vhat_t)

        var_update = state_ops.assign_sub(var, lr * m_t / (v_sqrt + epsilon_t),
                                          use_locking=self._use_locking)
        return control_flow_ops.group(*[var_update, m_t, v_t, vhat_t]) 
开发者ID:imsb-uke,项目名称:scGAN,代码行数:33,代码来源:AMSGrad.py

示例7: _apply_sparse_shared

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign_sub [as 别名]
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
        beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype)
        beta2_power = math_ops.cast(self._beta2_power, var.dtype.base_dtype)
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)

        lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))

        # m_t = beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, "m")
        m_scaled_g_values = grad * (1 - beta1_t)
        m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)
        with ops.control_dependencies([m_t]):
            m_t = scatter_add(m, indices, m_scaled_g_values)

        # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
        v = self.get_slot(var, "v")
        v_scaled_g_values = (grad * grad) * (1 - beta2_t)
        v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
        with ops.control_dependencies([v_t]):
            v_t = scatter_add(v, indices, v_scaled_g_values)

        # amsgrad
        vhat = self.get_slot(var, "vhat")
        vhat_t = state_ops.assign(vhat, math_ops.maximum(v_t, vhat))
        v_sqrt = math_ops.sqrt(vhat_t)
        var_update = state_ops.assign_sub(var, lr * m_t / (v_sqrt + epsilon_t),
                                          use_locking=self._use_locking)
        return control_flow_ops.group(*[var_update, m_t, v_t, vhat_t]) 
开发者ID:imsb-uke,项目名称:scGAN,代码行数:33,代码来源:AMSGrad.py

示例8: _apply_dense

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign_sub [as 别名]
def _apply_dense(self, grad, var):
        beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype)
        beta2_power = math_ops.cast(self._beta2_power, var.dtype.base_dtype)
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)

        lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))

        # m_t = beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, "m")
        m_scaled_g_values = grad * (1 - beta1_t)
        m_t = state_ops.assign(m, beta1_t * m + m_scaled_g_values, use_locking=self._use_locking)

        # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
        v = self.get_slot(var, "v")
        v_scaled_g_values = (grad * grad) * (1 - beta2_t)
        v_t = state_ops.assign(v, beta2_t * v + v_scaled_g_values, use_locking=self._use_locking)

        # amsgrad
        vhat = self.get_slot(var, "vhat")
        vhat_t = state_ops.assign(vhat, math_ops.maximum(v_t, vhat))
        v_sqrt = math_ops.sqrt(vhat_t)

        var_update = state_ops.assign_sub(var, lr * m_t / (v_sqrt + epsilon_t), use_locking=self._use_locking)
        return control_flow_ops.group(*[var_update, m_t, v_t, vhat_t]) 
开发者ID:HyperGAN,项目名称:HyperGAN,代码行数:29,代码来源:amsgrad.py

示例9: _resource_apply_dense

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign_sub [as 别名]
def _resource_apply_dense(self, grad, var):
        var = var.handle
        beta1_power = math_ops.cast(self._beta1_power, grad.dtype.base_dtype)
        beta2_power = math_ops.cast(self._beta2_power, grad.dtype.base_dtype)
        lr_t = math_ops.cast(self._lr_t, grad.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, grad.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, grad.dtype.base_dtype)
        epsilon_t = math_ops.cast(self._epsilon_t, grad.dtype.base_dtype)

        lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))

        # m_t = beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, "m").handle
        m_scaled_g_values = grad * (1 - beta1_t)
        m_t = state_ops.assign(m, beta1_t * m + m_scaled_g_values, use_locking=self._use_locking)

        # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
        v = self.get_slot(var, "v").handle
        v_scaled_g_values = (grad * grad) * (1 - beta2_t)
        v_t = state_ops.assign(v, beta2_t * v + v_scaled_g_values, use_locking=self._use_locking)

        # amsgrad
        vhat = self.get_slot(var, "vhat").handle
        vhat_t = state_ops.assign(vhat, math_ops.maximum(v_t, vhat))
        v_sqrt = math_ops.sqrt(vhat_t)

        var_update = state_ops.assign_sub(var, lr * m_t / (v_sqrt + epsilon_t), use_locking=self._use_locking)
        return control_flow_ops.group(*[var_update, m_t, v_t, vhat_t]) 
开发者ID:HyperGAN,项目名称:HyperGAN,代码行数:30,代码来源:amsgrad.py

示例10: _apply_sparse_shared

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign_sub [as 别名]
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
        beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype)
        beta2_power = math_ops.cast(self._beta2_power, var.dtype.base_dtype)
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)

        lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))

        # m_t = beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, "m")
        m_scaled_g_values = grad * (1 - beta1_t)
        m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)
        with ops.control_dependencies([m_t]):
            m_t = scatter_add(m, indices, m_scaled_g_values)

        # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
        v = self.get_slot(var, "v")
        v_scaled_g_values = (grad * grad) * (1 - beta2_t)
        v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
        with ops.control_dependencies([v_t]):
            v_t = scatter_add(v, indices, v_scaled_g_values)

        # amsgrad
        vhat = self.get_slot(var, "vhat")
        vhat_t = state_ops.assign(vhat, math_ops.maximum(v_t, vhat))
        v_sqrt = math_ops.sqrt(vhat_t)
        var_update = state_ops.assign_sub(var, lr * m_t / (v_sqrt + epsilon_t), use_locking=self._use_locking)
        return control_flow_ops.group(*[var_update, m_t, v_t, vhat_t]) 
开发者ID:HyperGAN,项目名称:HyperGAN,代码行数:32,代码来源:amsgrad.py

示例11: _apply_dense

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign_sub [as 别名]
def _apply_dense(self, grad, var):
    # Calculates the preconditioner statistics for each tensor.
    partitioned_grads = TensorPartitioner.partition_tensor(
        grad, self._partition_info)
    shape = var.get_shape()
    fallback_to_diagonal = self._fallback_to_diagonal_for_shape(shape)

    precond_statistics_update = []
    if not fallback_to_diagonal:
      precond_statistics_update = self._updated_statistics(
          var, partitioned_grads)

    accumulator = self.get_slot(var, "accumulator")
    accumulator_updated = state_ops.assign_add(accumulator, grad * grad)
    accumulator_inv_sqrt = math_ops.rsqrt(accumulator_updated + 1e-30)
    if self._momentum > 0.0:
      scaled_g = (1.0 - self._momentum_tensor) * (grad * accumulator_inv_sqrt)
      gbar = self.get_slot(var, "momentum")
      gbar_updated = state_ops.assign_add(
          gbar,
          gbar * (self._momentum_tensor - 1.0) + scaled_g)
    else:
      gbar_updated = (grad * accumulator_inv_sqrt)

    if not fallback_to_diagonal:
      # Update the preconditioner statistics followed by computing the
      # preconditioned gradient.
      with ops.control_dependencies(precond_statistics_update):
        s = tf.cast(self._run_nondiagonal_update, tf.float32)
        preconditioned_grad = self._preconditioned_update(
            var, partitioned_grads, gbar_updated)
        # slowly adapt from diagonal to preconditioned gradient.
        w = self._run_nondiagonal_update_warmup
        warmup_update = s * self._learning_rate_tensor * (
            w * preconditioned_grad + (1.0 - w) * gbar_updated)
        fallback_update = (1 - s) * (self._learning_rate_tensor * gbar_updated)
        return state_ops.assign_sub(var, warmup_update + fallback_update)
    else:
      return state_ops.assign_sub(var,
                                  self._learning_rate_tensor * gbar_updated) 
开发者ID:tensorflow,项目名称:lingvo,代码行数:42,代码来源:distributed_shampoo.py

示例12: assign_sub

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign_sub [as 别名]
def assign_sub(self, delta, use_locking=False):
    """Subtracts a value from this variable.

    This is essentially a shortcut for `assign_sub(self, delta)`.

    Args:
      delta: A `Tensor`. The value to subtract from this variable.
      use_locking: If `True`, use locking during the operation.

    Returns:
      A `Tensor` that will hold the new value of this variable after
      the subtraction has completed.
    """
    return state_ops.assign_sub(self._variable, delta, use_locking=use_locking) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:16,代码来源:variables.py

示例13: _apply_sparse_shared

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign_sub [as 别名]
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
    beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype)
    beta2_power = math_ops.cast(self._beta2_power, var.dtype.base_dtype)
    lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
    beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
    beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
    epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
    lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
    # m_t = beta1 * m + (1 - beta1) * g_t
    m = self.get_slot(var, "m")
    m_scaled_g_values = grad * (1 - beta1_t)
    m_t = state_ops.assign(m, m * beta1_t,
                           use_locking=self._use_locking)
    with ops.control_dependencies([m_t]):
      m_t = scatter_add(m, indices, m_scaled_g_values)
    # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
    v = self.get_slot(var, "v")
    v_scaled_g_values = (grad * grad) * (1 - beta2_t)
    v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
    with ops.control_dependencies([v_t]):
      v_t = scatter_add(v, indices, v_scaled_g_values)
    v_sqrt = math_ops.sqrt(v_t)
    var_update = state_ops.assign_sub(var,
                                      lr * m_t / (v_sqrt + epsilon_t),
                                      use_locking=self._use_locking)
    return control_flow_ops.group(*[var_update, m_t, v_t]) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:28,代码来源:adam.py

示例14: _mini_batch_sync_updates_op

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign_sub [as 别名]
def _mini_batch_sync_updates_op(self, update_in_steps,
                                  cluster_centers_var, cluster_centers_updated,
                                  total_counts):
    if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:
      assert update_in_steps is not None
      with ops.colocate_with(update_in_steps):
        def _f():
          # Note that there is a race condition here, so we do a best effort
          # updates here. We reset update_in_steps first so that other workers
          # don't duplicate the updates. Also we update cluster_center_vars
          # before resetting total_counts to avoid large updates to
          # cluster_centers_updated based on partially updated
          # cluster_center_vars.
          with ops.control_dependencies([state_ops.assign(
              update_in_steps,
              self._mini_batch_steps_per_iteration - 1)]):
            with ops.colocate_with(cluster_centers_updated):
              if self._distance_metric == COSINE_DISTANCE:
                cluster_centers = nn_impl.l2_normalize(cluster_centers_updated,
                                                       dim=1)
              else:
                cluster_centers = cluster_centers_updated
            with ops.colocate_with(cluster_centers_var):
              with ops.control_dependencies([state_ops.assign(
                  cluster_centers_var,
                  cluster_centers)]):
                with ops.colocate_with(cluster_centers_var):
                  with ops.control_dependencies([
                      state_ops.assign(total_counts,
                                       array_ops.zeros_like(total_counts))]):
                    return array_ops.identity(update_in_steps)
        return control_flow_ops.cond(
            update_in_steps <= 0,
            _f,
            lambda: state_ops.assign_sub(update_in_steps, 1))
    else:
      return control_flow_ops.no_op() 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:39,代码来源:clustering_ops.py

示例15: update_sub

# 需要导入模块: from tensorflow.python.ops import state_ops [as 别名]
# 或者: from tensorflow.python.ops.state_ops import assign_sub [as 别名]
def update_sub(x, decrement):
  return state_ops.assign_sub(x, decrement) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:4,代码来源:backend.py


注:本文中的tensorflow.python.ops.state_ops.assign_sub方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。