当前位置: 首页>>代码示例>>Python>>正文


Python slot_creator.create_zeros_slot方法代码示例

本文整理汇总了Python中tensorflow.python.training.slot_creator.create_zeros_slot方法的典型用法代码示例。如果您正苦于以下问题:Python slot_creator.create_zeros_slot方法的具体用法?Python slot_creator.create_zeros_slot怎么用?Python slot_creator.create_zeros_slot使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.training.slot_creator的用法示例。


在下文中一共展示了slot_creator.create_zeros_slot方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _zeros_slot

# 需要导入模块: from tensorflow.python.training import slot_creator [as 别名]
# 或者: from tensorflow.python.training.slot_creator import create_zeros_slot [as 别名]
def _zeros_slot(self, var, slot_name, op_name):
    """Find or create a slot initialized with 0.0.

    Args:
      var: A `Variable` object.
      slot_name: Name for the slot.
      op_name: Name to use when scoping the Variable that
        needs to be created for  the slot.

    Returns:
      A `Variable` object.
    """
    named_slots = self._slot_dict(slot_name)
    if _var_key(var) not in named_slots:
      named_slots[_var_key(var)] = slot_creator.create_zeros_slot(var, op_name)
    return named_slots[_var_key(var)] 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:18,代码来源:optimizer.py

示例2: _zeros_slot

# 需要导入模块: from tensorflow.python.training import slot_creator [as 别名]
# 或者: from tensorflow.python.training.slot_creator import create_zeros_slot [as 别名]
def _zeros_slot(self, var, slot_name, op_name):
    """Find or create a slot initialized with 0.0.

    Args:
      var: A `Variable` object.
      slot_name: Name for the slot.
      op_name: Name to use when scoping the Variable that
        needs to be created for  the slot.

    Returns:
      A `Variable` object.
    """
    named_slots = self._slot_dict(slot_name)
    if var not in named_slots:
      named_slots[var] = slot_creator.create_zeros_slot(var, op_name)
    return named_slots[var] 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:18,代码来源:optimizer.py

示例3: _zeros_slot

# 需要导入模块: from tensorflow.python.training import slot_creator [as 别名]
# 或者: from tensorflow.python.training.slot_creator import create_zeros_slot [as 别名]
def _zeros_slot(self, var, slot_name, op_name):
    """Find or create a slot initialized with 0.0.

    Args:
      var: A `Variable` object.
      slot_name: Name for the slot.
      op_name: Name to use when scoping the Variable that
        needs to be created for the slot.

    Returns:
      A `Variable` object.
    """
    named_slots = self._slot_dict(slot_name)
    if _var_key(var) not in named_slots:
      named_slots[_var_key(var)] = slot_creator.create_zeros_slot(var, op_name)
    return named_slots[_var_key(var)] 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:18,代码来源:optimizer.py

示例4: _zeros_slot

# 需要导入模块: from tensorflow.python.training import slot_creator [as 别名]
# 或者: from tensorflow.python.training.slot_creator import create_zeros_slot [as 别名]
def _zeros_slot(self, var, slot_name, op_name):
    named_slots = self._slot_dict(slot_name)
    if var not in named_slots:
      named_slots[var] = slot_creator.create_zeros_slot(var, op_name)
    return named_slots[var]

  # TODO: in RMSProp native code, memcpy() (for CPU) and
  # cudaMemcpyAsync() (for GPU) are used when updating values,
  # and values might tend to be overwritten with results from other threads.
  # (Need to check the learning performance with replacing it) 
开发者ID:yushu-liu,项目名称:icra2017-visual-navigation,代码行数:12,代码来源:rmsprop_applier.py

示例5: testCreateZerosSlotFromVariable

# 需要导入模块: from tensorflow.python.training import slot_creator [as 别名]
# 或者: from tensorflow.python.training.slot_creator import create_zeros_slot [as 别名]
def testCreateZerosSlotFromVariable(self):
    with self.test_session():
      v = tf.Variable([1.0, 2.5], name="var")
      with tf.control_dependencies(None):
        slot = slot_creator.create_zeros_slot(v, name="slot", dtype=tf.float64)

      tf.global_variables_initializer().run()

      self.assertEqual(slot.op.name, "var/slot")
      self.assertEqual(slot.get_shape().as_list(), [2])
      self.assertEqual(slot.dtype.base_dtype, tf.float64)
      self.assertAllEqual(slot.eval(), [0.0, 0.0]) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:14,代码来源:slot_creator_test.py

示例6: testCreateZerosSlotFromTensor

# 需要导入模块: from tensorflow.python.training import slot_creator [as 别名]
# 或者: from tensorflow.python.training.slot_creator import create_zeros_slot [as 别名]
def testCreateZerosSlotFromTensor(self):
    with self.test_session():
      v = tf.constant([1.0, 2.5], name="const")
      with tf.control_dependencies(None):
        slot = slot_creator.create_zeros_slot(v, name="slot")

      tf.global_variables_initializer().run()

      self.assertEqual(slot.op.name, "const/slot")
      self.assertEqual(slot.get_shape().as_list(), [2])
      self.assertEqual(slot.dtype.base_dtype, tf.float32)
      self.assertAllEqual(slot.eval(), [0.0, 0.0]) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:14,代码来源:slot_creator_test.py

示例7: _zeros_slot

# 需要导入模块: from tensorflow.python.training import slot_creator [as 别名]
# 或者: from tensorflow.python.training.slot_creator import create_zeros_slot [as 别名]
def _zeros_slot(self, var, slot_name, op_name):
        named_slots = self._slot_dict(slot_name)
        if var not in named_slots:
            named_slots[var] = slot_creator.create_zeros_slot(var, op_name)
        return named_slots[var]

    # TODO: in RMSProp native code, memcpy() (for CPU) and
    # cudaMemcpyAsync() (for GPU) are used when updating values,
    # and values might tend to be overwritten with results from other threads.
    # (Need to check the learning performance with replacing it) 
开发者ID:SuReLI,项目名称:Deep-RL-agents,代码行数:12,代码来源:rmsprop_applier.py

示例8: _apply_sparse_shared

# 需要导入模块: from tensorflow.python.training import slot_creator [as 别名]
# 或者: from tensorflow.python.training.slot_creator import create_zeros_slot [as 别名]
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
        beta1_weight, beta2_weight = self._get_beta_weights()

        learning_rate_tensor = math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype)
        beta1_tensor = math_ops.cast(self._beta1_tensor, var.dtype.base_dtype)
        beta2_tensor = math_ops.cast(self._beta2_tensor, var.dtype.base_dtype)
        nu1_tensor = math_ops.cast(self._nu1_tensor, var.dtype.base_dtype)
        nu2_tensor = math_ops.cast(self._nu2_tensor, var.dtype.base_dtype)
        epsilon_tensor = math_ops.cast(self._epsilon_tensor, var.dtype.base_dtype)

        beta1_weight = math_ops.cast(beta1_weight, var.dtype.base_dtype) * beta1_tensor + 1.0
        beta2_weight = math_ops.cast(beta2_weight, var.dtype.base_dtype) * beta2_tensor + 1.0

        beta1_adj = 1.0 - (1.0 / beta1_weight)
        beta2_adj = 1.0 - (1.0 / beta2_weight)

        exp_avg = self.get_slot(var, "exp_avg")
        exp_avg_sq = self.get_slot(var, "exp_avg_sq")

        grad_sq = grad * grad

        exp_avg_tensor = state_ops.assign(exp_avg, beta1_adj * exp_avg, use_locking=self._use_locking)
        with ops.control_dependencies([exp_avg_tensor]):
            exp_avg_tensor = scatter_add(exp_avg, indices, (1.0 - beta1_adj) * grad)

        exp_avg_sq_tensor = state_ops.assign(exp_avg_sq, beta2_adj * exp_avg_sq, use_locking=self._use_locking)
        with ops.control_dependencies([exp_avg_sq_tensor]):
            exp_avg_sq_tensor = scatter_add(exp_avg_sq, indices, (1.0 - beta2_adj) * grad_sq)

        avg_grad = slot_creator.create_zeros_slot(var, self._name)
        avg_grad_tensor = state_ops.assign(avg_grad, nu1_tensor * exp_avg_tensor, use_locking=self._use_locking)
        with ops.control_dependencies([avg_grad_tensor]):
            avg_grad_tensor = scatter_add(avg_grad, indices, (1.0 - nu1_tensor) * grad)

        avg_grad_sq = slot_creator.create_zeros_slot(var, self._name)
        avg_grad_sq_tensor = state_ops.assign(
            avg_grad_sq, nu2_tensor * exp_avg_sq_tensor, use_locking=self._use_locking
        )
        with ops.control_dependencies([avg_grad_sq_tensor]):
            avg_grad_sq_tensor = scatter_add(avg_grad_sq, indices, (1.0 - nu2_tensor) * grad_sq)

        avg_grad_rms_tensor = math_ops.sqrt(avg_grad_sq_tensor)

        var_update = state_ops.assign_add(
            var,
            -learning_rate_tensor * avg_grad_tensor / (avg_grad_rms_tensor + epsilon_tensor),
            use_locking=self._use_locking,
        )

        return control_flow_ops.group(*[var_update, exp_avg_tensor, exp_avg_sq_tensor]) 
开发者ID:facebookresearch,项目名称:qhoptim,代码行数:52,代码来源:qhadam.py

示例9: apply

# 需要导入模块: from tensorflow.python.training import slot_creator [as 别名]
# 或者: from tensorflow.python.training.slot_creator import create_zeros_slot [as 别名]
def apply(self, var_list=None):

        if var_list is None:
            var_list = variables.trainable_variables()

        for var in var_list:
            if var.dtype.base_dtype not in [dtypes.float16, dtypes.float32,
                                            dtypes.float64]:
                raise TypeError("The variables must be half, float, or double: %s" %
                                var.name)

            if var not in self._averages:
                # For variables: to lower communication bandwidth across devices we keep
                # the moving averages on the same device as the variables. For other
                # tensors, we rely on the existing device allocation mechanism.
                with ops.init_scope():
                    if isinstance(var, variables.Variable):
                        avg = slot_creator.create_slot(var,
                                                       var.initialized_value(),
                                                       self.name,
                                                       colocate_with_primary=True)
                        # NOTE(mrry): We only add `tf.Variable` objects to the
                        # `MOVING_AVERAGE_VARIABLES` collection.
                        ops.add_to_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES, var)
                    else:
                        avg = slot_creator.create_zeros_slot(
                            var,
                            self.name,
                            colocate_with_primary=(var.op.type in ["Variable",
                                                                   "VariableV2",
                                                                   "VarHandleOp"]))
                self._averages[var] = avg

        with ops.device('/cpu:0'):
            self._n_models = variable_scope.get_variable(shape=[],
                                                         dtype=dtypes.float32,
                                                         name='n_models',
                                                         initializer=init_ops.constant_initializer(0.),
                                                         trainable=False)

        with ops.name_scope(self.name) as scope:
            updates = []
            for var in var_list:
                updates.append(assign_stochastic_average(self._averages[var], var, self._n_models))
            with ops.control_dependencies(updates):
                update_n_models = state_ops.assign_add(self._n_models, 1., name=scope)
            return update_n_models 
开发者ID:JGuillaumin,项目名称:swa-tf,代码行数:49,代码来源:stochastic_weight_averaging.py


注:本文中的tensorflow.python.training.slot_creator.create_zeros_slot方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。