當前位置: 首頁>>代碼示例>>Python>>正文


Python state_ops.scatter_add方法代碼示例

本文整理匯總了Python中tensorflow.python.ops.state_ops.scatter_add方法的典型用法代碼示例。如果您正苦於以下問題:Python state_ops.scatter_add方法的具體用法?Python state_ops.scatter_add怎麽用?Python state_ops.scatter_add使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.ops.state_ops的用法示例。


在下文中一共展示了state_ops.scatter_add方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _apply_sparse_shared

# 需要導入模塊: from tensorflow.python.ops import state_ops [as 別名]
# 或者: from tensorflow.python.ops.state_ops import scatter_add [as 別名]
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
        beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype)
        beta2_power = math_ops.cast(self._beta2_power, var.dtype.base_dtype)
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)

        lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))

        # m_t = beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, "m")
        m_scaled_g_values = grad * (1 - beta1_t)
        m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)
        with ops.control_dependencies([m_t]):
            m_t = scatter_add(m, indices, m_scaled_g_values)

        # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
        v = self.get_slot(var, "v")
        v_scaled_g_values = (grad * grad) * (1 - beta2_t)
        v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
        with ops.control_dependencies([v_t]):
            v_t = scatter_add(v, indices, v_scaled_g_values)

        # amsgrad
        vhat = self.get_slot(var, "vhat")
        vhat_t = state_ops.assign(vhat, math_ops.maximum(v_t, vhat))
        v_sqrt = math_ops.sqrt(vhat_t)
        var_update = state_ops.assign_sub(var, lr * m_t / (v_sqrt + epsilon_t),
                                          use_locking=self._use_locking)
        return control_flow_ops.group(*[var_update, m_t, v_t, vhat_t]) 
開發者ID:imsb-uke,項目名稱:scGAN,代碼行數:33,代碼來源:AMSGrad.py

示例2: _apply_sparse

# 需要導入模塊: from tensorflow.python.ops import state_ops [as 別名]
# 或者: from tensorflow.python.ops.state_ops import scatter_add [as 別名]
def _apply_sparse(self, grad, var):
        return self._apply_sparse_shared(
            grad.values, var, grad.indices,
            lambda x, i, v: state_ops.scatter_add(
                # pylint: disable=g-long-lambda
                x, i, v, use_locking=self._use_locking)) 
開發者ID:imsb-uke,項目名稱:scGAN,代碼行數:8,代碼來源:AMSGrad.py

示例3: _apply_sparse_shared

# 需要導入模塊: from tensorflow.python.ops import state_ops [as 別名]
# 或者: from tensorflow.python.ops.state_ops import scatter_add [as 別名]
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
        beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype)
        beta2_power = math_ops.cast(self._beta2_power, var.dtype.base_dtype)
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
        beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
        epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)

        lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))

        # m_t = beta1 * m + (1 - beta1) * g_t
        m = self.get_slot(var, "m")
        m_scaled_g_values = grad * (1 - beta1_t)
        m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)
        with ops.control_dependencies([m_t]):
            m_t = scatter_add(m, indices, m_scaled_g_values)

        # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
        v = self.get_slot(var, "v")
        v_scaled_g_values = (grad * grad) * (1 - beta2_t)
        v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
        with ops.control_dependencies([v_t]):
            v_t = scatter_add(v, indices, v_scaled_g_values)

        # amsgrad
        vhat = self.get_slot(var, "vhat")
        vhat_t = state_ops.assign(vhat, math_ops.maximum(v_t, vhat))
        v_sqrt = math_ops.sqrt(vhat_t)
        var_update = state_ops.assign_sub(var, lr * m_t / (v_sqrt + epsilon_t), use_locking=self._use_locking)
        return control_flow_ops.group(*[var_update, m_t, v_t, vhat_t]) 
開發者ID:HyperGAN,項目名稱:HyperGAN,代碼行數:32,代碼來源:amsgrad.py

示例4: _apply_sparse

# 需要導入模塊: from tensorflow.python.ops import state_ops [as 別名]
# 或者: from tensorflow.python.ops.state_ops import scatter_add [as 別名]
def _apply_sparse(self, grad, var):
        return self._apply_sparse_shared(
            grad.values, var, grad.indices,
            lambda x, i, v: state_ops.scatter_add(  # pylint: disable=g-long-lambda
                x, i, v, use_locking=self._use_locking)) 
開發者ID:HyperGAN,項目名稱:HyperGAN,代碼行數:7,代碼來源:amsgrad.py

示例5: _apply_sparse_shared

# 需要導入模塊: from tensorflow.python.ops import state_ops [as 別名]
# 或者: from tensorflow.python.ops.state_ops import scatter_add [as 別名]
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
    beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype)
    beta2_power = math_ops.cast(self._beta2_power, var.dtype.base_dtype)
    lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
    beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
    beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
    epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
    lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
    # m_t = beta1 * m + (1 - beta1) * g_t
    m = self.get_slot(var, "m")
    m_scaled_g_values = grad * (1 - beta1_t)
    m_t = state_ops.assign(m, m * beta1_t,
                           use_locking=self._use_locking)
    with ops.control_dependencies([m_t]):
      m_t = scatter_add(m, indices, m_scaled_g_values)
    # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
    v = self.get_slot(var, "v")
    v_scaled_g_values = (grad * grad) * (1 - beta2_t)
    v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
    with ops.control_dependencies([v_t]):
      v_t = scatter_add(v, indices, v_scaled_g_values)
    v_sqrt = math_ops.sqrt(v_t)
    var_update = state_ops.assign_sub(var,
                                      lr * m_t / (v_sqrt + epsilon_t),
                                      use_locking=self._use_locking)
    return control_flow_ops.group(*[var_update, m_t, v_t]) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:28,代碼來源:adam.py

示例6: _apply_sparse

# 需要導入模塊: from tensorflow.python.ops import state_ops [as 別名]
# 或者: from tensorflow.python.ops.state_ops import scatter_add [as 別名]
def _apply_sparse(self, grad, var):
    return self._apply_sparse_shared(
        grad.values, var, grad.indices,
        lambda x, i, v: state_ops.scatter_add(  # pylint: disable=g-long-lambda
            x, i, v, use_locking=self._use_locking)) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:7,代碼來源:adam.py

示例7: _apply_sparse

# 需要導入模塊: from tensorflow.python.ops import state_ops [as 別名]
# 或者: from tensorflow.python.ops.state_ops import scatter_add [as 別名]
def _apply_sparse(self, grad, var):
    beta1_power = math_ops.cast(self._beta1_power, var.dtype.base_dtype)
    beta2_power = math_ops.cast(self._beta2_power, var.dtype.base_dtype)
    lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
    beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
    beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
    epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
    lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
    # m_t = beta1 * m + (1 - beta1) * g_t
    m = self.get_slot(var, "m")
    m_scaled_g_values = grad.values * (1 - beta1_t)
    m_t = state_ops.assign(m, m * beta1_t,
                           use_locking=self._use_locking)
    m_t = state_ops.scatter_add(m_t, grad.indices, m_scaled_g_values,
                               use_locking=self._use_locking)
    # v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
    v = self.get_slot(var, "v")
    v_scaled_g_values = (grad.values * grad.values) * (1 - beta2_t)
    v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
    v_t = state_ops.scatter_add(v_t, grad.indices, v_scaled_g_values,
                               use_locking=self._use_locking)
    v_sqrt = math_ops.sqrt(v_t)
    var_update = state_ops.assign_sub(var,
                                      lr * m_t / (v_sqrt + epsilon_t),
                                      use_locking=self._use_locking)
    return control_flow_ops.group(*[var_update, m_t, v_t]) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:28,代碼來源:adam.py

示例8: _apply_sparse

# 需要導入模塊: from tensorflow.python.ops import state_ops [as 別名]
# 或者: from tensorflow.python.ops.state_ops import scatter_add [as 別名]
def _apply_sparse(self, grad, var):
		return self._apply_sparse_shared(
			grad.values,
			var,
			grad.indices,
			lambda x, i, v: state_ops.scatter_add(x, i, v, use_locking=self._use_locking)) 
開發者ID:yyht,項目名稱:BERT,代碼行數:8,代碼來源:lamb_utils.py

示例9: _apply_sparse

# 需要導入模塊: from tensorflow.python.ops import state_ops [as 別名]
# 或者: from tensorflow.python.ops.state_ops import scatter_add [as 別名]
def _apply_sparse(self, grad, var):
        return self._apply_sparse_shared(
            grad.values,
            var,
            grad.indices,
            lambda x, i, v: state_ops.scatter_add(x, i, v, use_locking=self._use_locking)) 
開發者ID:yyht,項目名稱:BERT,代碼行數:8,代碼來源:radam_utils.py

示例10: _decay_weights_sparse_op

# 需要導入模塊: from tensorflow.python.ops import state_ops [as 別名]
# 或者: from tensorflow.python.ops.state_ops import scatter_add [as 別名]
def _decay_weights_sparse_op(self, var, indices, scatter_add):
    if not self._decay_var_list or var in self._decay_var_list:
      update = -self._weight_decay * array_ops.gather(var, indices)
      return scatter_add(var, indices, update, self._use_locking)
    return control_flow_ops.no_op()

  # Here, we overwrite the apply functions that the base optimizer calls.
  # super().apply_x resolves to the apply_x function of the BaseOptimizer. 
開發者ID:yyht,項目名稱:BERT,代碼行數:10,代碼來源:adam_weight_decay_exclude_utils.py

示例11: _apply_sparse

# 需要導入模塊: from tensorflow.python.ops import state_ops [as 別名]
# 或者: from tensorflow.python.ops.state_ops import scatter_add [as 別名]
def _apply_sparse(self, grad, var):
    scatter_add = state_ops.scatter_add
    decay_op = self._decay_weights_sparse_op(var, grad.indices, scatter_add)
    with ops.control_dependencies([decay_op]):
      return super(DecoupledWeightDecayExtension, self)._apply_sparse(
          grad, var) 
開發者ID:yyht,項目名稱:BERT,代碼行數:8,代碼來源:adam_weight_decay_exclude_utils.py

示例12: _resource_apply_sparse

# 需要導入模塊: from tensorflow.python.ops import state_ops [as 別名]
# 或者: from tensorflow.python.ops.state_ops import scatter_add [as 別名]
def _resource_apply_sparse(self, grad, var, indices):
    scatter_add = self._resource_scatter_add
    decay_op = self._decay_weights_sparse_op(var, indices, scatter_add)
    with ops.control_dependencies([decay_op]):
      return super(DecoupledWeightDecayExtension, self)._resource_apply_sparse(
          grad, var, indices) 
開發者ID:yyht,項目名稱:BERT,代碼行數:8,代碼來源:adam_weight_decay_exclude_utils.py

示例13: _decay_weights_sparse_op

# 需要導入模塊: from tensorflow.python.ops import state_ops [as 別名]
# 或者: from tensorflow.python.ops.state_ops import scatter_add [as 別名]
def _decay_weights_sparse_op(self, var, indices, scatter_add):
		if not self._decay_var_list or var in self._decay_var_list:
			return scatter_add(var, indices, -self._weight_decay * var,
												 self._use_locking)
		return control_flow_ops.no_op()

	# Here, we overwrite the apply functions that the base optimizer calls.
	# super().apply_x resolves to the apply_x function of the BaseOptimizer. 
開發者ID:yyht,項目名稱:BERT,代碼行數:10,代碼來源:adam_weight_decay_utils.py

示例14: _apply_sparse

# 需要導入模塊: from tensorflow.python.ops import state_ops [as 別名]
# 或者: from tensorflow.python.ops.state_ops import scatter_add [as 別名]
def _apply_sparse(self, grad, var):
		scatter_add = state_ops.scatter_add
		decay_op = self._decay_weights_sparse_op(var, grad.indices, scatter_add)
		with ops.control_dependencies([decay_op]):
			return super(DecoupledWeightDecayExtension, self)._apply_sparse(
					grad, var) 
開發者ID:yyht,項目名稱:BERT,代碼行數:8,代碼來源:adam_weight_decay_utils.py

示例15: _resource_scatter_add

# 需要導入模塊: from tensorflow.python.ops import state_ops [as 別名]
# 或者: from tensorflow.python.ops.state_ops import scatter_add [as 別名]
def _resource_scatter_add(self, x, i, v, _=None):
		# last argument allows for one overflow argument, to have the same function
		# signature as state_ops.scatter_add
		with ops.control_dependencies(
				[resource_variable_ops.resource_scatter_add(x.handle, i, v)]):
			return x.value() 
開發者ID:yyht,項目名稱:BERT,代碼行數:8,代碼來源:adam_weight_decay_utils.py


注:本文中的tensorflow.python.ops.state_ops.scatter_add方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。