当前位置: 首页>>代码示例>>Python>>正文


Python math_ops.cos函数代码示例

本文整理汇总了Python中tensorflow.python.ops.math_ops.cos函数的典型用法代码示例。如果您正苦于以下问题:Python cos函数的具体用法?Python cos怎么用?Python cos使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了cos函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: decayed_lr

    def decayed_lr():
      """Helper to recompute learning rate; most helpful in eager-mode."""
      global_step_recomp = math_ops.cast(global_step, dtype)
      completed_fraction = global_step_recomp / first_decay_steps

      def compute_step(completed_fraction, geometric=False):
        """Helper for `cond` operation."""
        if geometric:
          i_restart = math_ops.floor(
              math_ops.log(1.0 - completed_fraction * (1.0 - t_mul)) /
              math_ops.log(t_mul))

          sum_r = (1.0 - t_mul**i_restart) / (1.0 - t_mul)
          completed_fraction = (completed_fraction - sum_r) / t_mul**i_restart

        else:
          i_restart = math_ops.floor(completed_fraction)
          completed_fraction -= i_restart

        return i_restart, completed_fraction

      i_restart, completed_fraction = control_flow_ops.cond(
          math_ops.equal(t_mul, 1.0),
          lambda: compute_step(completed_fraction, geometric=False),
          lambda: compute_step(completed_fraction, geometric=True))

      m_fac = m_mul**i_restart
      cosine_decayed = 0.5 * m_fac * (1.0 + math_ops.cos(
          constant_op.constant(math.pi) * completed_fraction))
      decayed = (1 - alpha) * cosine_decayed + alpha

      return math_ops.multiply(learning_rate, decayed, name=name)
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:32,代码来源:learning_rate_decay.py

示例2: _SinGrad

def _SinGrad(op, grad):
  """Returns grad * cos(x)."""
  x = op.inputs[0]
  with ops.control_dependencies([grad.op]):
    if x.dtype.is_complex:
      x = math_ops.conj(x)
    return grad * math_ops.cos(x)
开发者ID:0ruben,项目名称:tensorflow,代码行数:7,代码来源:math_grad.py

示例3: _TanGrad

def _TanGrad(op, grad):
  """Returns grad * 1/sec^2(x)."""
  x = op.inputs[0]
  with ops.control_dependencies([grad.op]):
    secx = math_ops.inv(math_ops.cos(x))
    secx2 = math_ops.square(secx)
    return grad * secx2
开发者ID:0ruben,项目名称:tensorflow,代码行数:7,代码来源:math_grad.py

示例4: decayed_lr

  def decayed_lr(learning_rate, global_step, decay_steps, initial_variance,
                 variance_decay, num_periods, alpha, beta, name):
    """Helper to recompute learning rate; most helpful in eager-mode."""
    with ops.name_scope(name, "NoisyLinearCosineDecay",
                        [learning_rate, global_step]) as name:
      learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
      dtype = learning_rate.dtype
      decay_steps = math_ops.cast(decay_steps, dtype)
      initial_variance = math_ops.cast(initial_variance, dtype)
      variance_decay = math_ops.cast(variance_decay, dtype)
      num_periods = math_ops.cast(num_periods, dtype)
      alpha = math_ops.cast(alpha, dtype)
      beta = math_ops.cast(beta, dtype)

      global_step_recomp = math_ops.cast(global_step, dtype)
      global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
      linear_decayed = (decay_steps - global_step_recomp) / decay_steps
      variance = initial_variance / (
          math_ops.pow(1.0 + global_step_recomp, variance_decay))
      std = math_ops.sqrt(variance)
      noisy_linear_decayed = (
          linear_decayed + random_ops.random_normal(
              linear_decayed.shape, stddev=std))

      completed_fraction = global_step_recomp / decay_steps
      fraction = 2.0 * num_periods * completed_fraction
      cosine_decayed = 0.5 * (
          1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
      noisy_linear_cosine_decayed = (
          (alpha + noisy_linear_decayed) * cosine_decayed + beta)

      return math_ops.multiply(
          learning_rate, noisy_linear_cosine_decayed, name=name)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:33,代码来源:learning_rate_decay_v2.py

示例5: __call__

  def __call__(self, step):
    with ops.name_scope(self.name, "NoisyLinearCosineDecay",
                        [self.initial_learning_rate, step]) as name:
      initial_learning_rate = ops.convert_to_tensor(
          self.initial_learning_rate, name="initial_learning_rate")
      dtype = initial_learning_rate.dtype
      decay_steps = math_ops.cast(self.decay_steps, dtype)
      initial_variance = math_ops.cast(self.initial_variance, dtype)
      variance_decay = math_ops.cast(self.variance_decay, dtype)
      num_periods = math_ops.cast(self.num_periods, dtype)
      alpha = math_ops.cast(self.alpha, dtype)
      beta = math_ops.cast(self.beta, dtype)

      global_step_recomp = math_ops.cast(step, dtype)
      global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
      linear_decayed = (decay_steps - global_step_recomp) / decay_steps
      variance = initial_variance / (
          math_ops.pow(1.0 + global_step_recomp, variance_decay))
      std = math_ops.sqrt(variance)
      noisy_linear_decayed = (
          linear_decayed + random_ops.random_normal(
              linear_decayed.shape, stddev=std))

      completed_fraction = global_step_recomp / decay_steps
      fraction = 2.0 * num_periods * completed_fraction
      cosine_decayed = 0.5 * (
          1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
      noisy_linear_cosine_decayed = (
          (alpha + noisy_linear_decayed) * cosine_decayed + beta)

      return math_ops.multiply(
          initial_learning_rate, noisy_linear_cosine_decayed, name=name)
开发者ID:terrytangyuan,项目名称:tensorflow,代码行数:32,代码来源:learning_rate_schedule.py

示例6: _raised_cosine_window

def _raised_cosine_window(name, default_name, window_length, periodic,
                          dtype, a, b):
  """Helper function for computing a raised cosine window.

  Args:
    name: Name to use for the scope.
    default_name: Default name to use for the scope.
    window_length: A scalar `Tensor` or integer indicating the window length.
    periodic: A bool `Tensor` indicating whether to generate a periodic or
      symmetric window.
    dtype: A floating point `DType`.
    a: The alpha parameter to the raised cosine window.
    b: The beta parameter to the raised cosine window.

  Returns:
    A `Tensor` of shape `[window_length]` of type `dtype`.

  Raises:
    ValueError: If `dtype` is not a floating point type or `window_length` is
      not scalar or `periodic` is not scalar.
  """
  if not dtype.is_floating:
    raise ValueError('dtype must be a floating point type. Found %s' % dtype)

  with ops.name_scope(name, default_name, [window_length, periodic]):
    window_length = ops.convert_to_tensor(window_length, dtype=dtypes.int32,
                                          name='window_length')
    window_length.shape.assert_has_rank(0)
    window_length_const = tensor_util.constant_value(window_length)
    if window_length_const == 1:
      return array_ops.ones([1], dtype=dtype)
    periodic = math_ops.cast(
        ops.convert_to_tensor(periodic, dtype=dtypes.bool, name='periodic'),
        dtypes.int32)
    periodic.shape.assert_has_rank(0)
    even = 1 - math_ops.mod(window_length, 2)

    n = math_ops.cast(window_length + periodic * even - 1, dtype=dtype)
    count = math_ops.cast(math_ops.range(window_length), dtype)
    cos_arg = constant_op.constant(2 * np.pi, dtype=dtype) * count / n

    if window_length_const is not None:
      return math_ops.cast(a - b * math_ops.cos(cos_arg), dtype=dtype)
    return control_flow_ops.cond(
        math_ops.equal(window_length, 1),
        lambda: array_ops.ones([1], dtype=dtype),
        lambda: math_ops.cast(a - b * math_ops.cos(cos_arg), dtype=dtype))
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:47,代码来源:window_ops.py

示例7: cosine_decay

def cosine_decay(learning_rate, global_step, decay_steps, alpha=0.0,
                 name=None):
  """Applies cosine decay to the learning rate.

  See [Loshchilov & Hutter, ICLR2016], SGDR: Stochastic Gradient Descent
  with Warm Restarts. https://arxiv.org/abs/1608.03983

  When training a model, it is often recommended to lower the learning rate as
  the training progresses.  This function applies a cosine decay function
  to a provided initial learning rate.  It requires a `global_step` value to
  compute the decayed learning rate.  You can just pass a TensorFlow variable
  that you increment at each training step.

  The function returns the decayed learning rate.  It is computed as:
  ```python
  global_step = min(global_step, decay_steps)
  cosine_decay = 0.5 * (1 + cos(pi * global_step / decay_steps))
  decayed = (1 - alpha) * cosine_decay + alpha
  decayed_learning_rate = learning_rate * decayed
  ```

  Example usage:
  ```python
  decay_steps = 1000
  lr_decayed = cosine_decay(learning_rate, global_step, decay_steps)
  ```

  Args:
    learning_rate: A scalar `float32` or `float64` Tensor or a Python number.
      The initial learning rate.
    global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
      Global step to use for the decay computation.
    decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
      Number of steps to decay over.
    alpha: A scalar `float32` or `float64` Tensor or a Python number.
      Minimum learning rate value as a fraction of learning_rate.
    name: String. Optional name of the operation.  Defaults to 'CosineDecay'.
  Returns:
    A scalar `Tensor` of the same type as `learning_rate`.  The decayed
    learning rate.
  Raises:
    ValueError: if `global_step` is not supplied.
  """
  if global_step is None:
    raise ValueError("cosine decay requires global_step")
  with ops.name_scope(name, "CosineDecay",
                      [learning_rate, global_step]) as name:
    learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
    dtype = learning_rate.dtype
    global_step = math_ops.cast(global_step, dtype)
    decay_steps = math_ops.cast(decay_steps, dtype)
    global_step = math_ops.minimum(global_step, decay_steps)
    completed_fraction = global_step / decay_steps
    cosine_decayed = 0.5 * (
        1.0 + math_ops.cos(constant_op.constant(math.pi) * completed_fraction))

    decayed = (1 - alpha) * cosine_decayed + alpha
    return math_ops.multiply(learning_rate, decayed)
开发者ID:Lin-jipeng,项目名称:tensorflow,代码行数:58,代码来源:learning_rate_decay.py

示例8: angles_to_projective_transforms

def angles_to_projective_transforms(angles,
                                    image_height,
                                    image_width,
                                    name=None):
  """Returns projective transform(s) for the given angle(s).

  Args:
    angles: A scalar angle to rotate all images by, or (for batches of images)
        a vector with an angle to rotate each image in the batch. The rank must
        be statically known (the shape is not `TensorShape(None)`.
    image_height: Height of the image(s) to be transformed.
    image_width: Width of the image(s) to be transformed.

  Returns:
    A tensor of shape (num_images, 8). Projective transforms which can be given
      to `tf.contrib.image.transform`.
  """
  with ops.name_scope(name, "angles_to_projective_transforms"):
    angle_or_angles = ops.convert_to_tensor(
        angles, name="angles", dtype=dtypes.float32)
    if len(angle_or_angles.get_shape()) == 0:  # pylint: disable=g-explicit-length-test
      angles = angle_or_angles[None]
    elif len(angle_or_angles.get_shape()) == 1:
      angles = angle_or_angles
    else:
      raise TypeError("Angles should have rank 0 or 1.")
    x_offset = ((image_width - 1) - (math_ops.cos(angles) *
                                     (image_width - 1) - math_ops.sin(angles) *
                                     (image_height - 1))) / 2.0
    y_offset = ((image_height - 1) - (math_ops.sin(angles) *
                                      (image_width - 1) + math_ops.cos(angles) *
                                      (image_height - 1))) / 2.0
    num_angles = array_ops.shape(angles)[0]
    return array_ops.concat(
        values=[
            math_ops.cos(angles)[:, None],
            -math_ops.sin(angles)[:, None],
            x_offset[:, None],
            math_ops.sin(angles)[:, None],
            math_ops.cos(angles)[:, None],
            y_offset[:, None],
            array_ops.zeros((num_angles, 2), dtypes.float32),
        ],
        axis=1)
开发者ID:Eagle732,项目名称:tensorflow,代码行数:44,代码来源:image_ops.py

示例9: map

  def map(self, input_tensor):
    """Maps each row of input_tensor using random Fourier features.

    Args:
      input_tensor: a `Tensor` containing input features. It's shape is
      [batch_size, self._input_dim].

    Returns:
      A `Tensor` of shape [batch_size, self._output_dim] containing RFFM-mapped
      features.

    Raises:
      InvalidShapeError: if the shape of the `input_tensor` is inconsistent with
        expected input dimension.
    """
    input_tensor_shape = input_tensor.get_shape()
    if len(input_tensor_shape) != 2:
      raise dkm.InvalidShapeError(
          'The shape of the tensor should be 2. Got %d instead.' %
          len(input_tensor_shape))

    features_dim = input_tensor_shape[1]
    if features_dim != self._input_dim:
      raise dkm.InvalidShapeError(
          'Invalid dimension: expected %d input features, got %d instead.' %
          (self._input_dim, features_dim))

    # Add ops that compute (deterministically) omega_matrix and bias based on
    # the provided seed.
    # TODO(sibyl-vie3Poto): Storing the mapper's parameters (omega_matrix and bias) as
    # constants incurs no RPC calls to the parameter server during distributed
    # training. However, if the parameters grow too large (for instance if they
    # don't fit into memory or if they blow up the size of the GraphDef proto),
    # stroring them as constants is no longer an option. In this case, we should
    # have a heuristic to choose out of one of the following alternatives:
    # a) store them as variables (in the parameter server)
    # b) store them as worker local variables
    # c) generating on the fly the omega matrix at each step
    np.random.seed(self._seed)
    omega_matrix_shape = [self._input_dim, self._output_dim]
    bias_shape = [self._output_dim]

    omega_matrix = constant_op.constant(
        np.random.normal(
            scale=1.0 / self._stddev, size=omega_matrix_shape),
        dtype=dtypes.float32)
    bias = constant_op.constant(
        np.random.uniform(
            low=0.0, high=2 * np.pi, size=bias_shape),
        dtype=dtypes.float32)

    x_omega_plus_bias = math_ops.add(
        math_ops.matmul(input_tensor, omega_matrix), bias)
    return math.sqrt(2.0 / self._output_dim) * math_ops.cos(x_omega_plus_bias)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:54,代码来源:random_fourier_features.py

示例10: Test

 def Test(self):
   np.random.seed(1)
   n = shape_[-1]
   batch_shape = shape_[:-2]
   np_dtype = dtype_.as_numpy_dtype
   a = np.random.uniform(
       low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
   if dtype_.is_complex:
     a += 1j * np.random.uniform(
         low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
   a += np.conj(a.T)
   a = np.tile(a, batch_shape + (1, 1))
   # Optimal stepsize for central difference is O(epsilon^{1/3}).
   epsilon = np.finfo(np_dtype).eps
   delta = 0.1 * epsilon**(1.0 / 3.0)
   # tolerance obtained by looking at actual differences using
   # np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
   if dtype_ in (dtypes_lib.float32, dtypes_lib.complex64):
     tol = 1e-2
   else:
     tol = 1e-7
   with self.session(use_gpu=True):
     tf_a = constant_op.constant(a)
     if compute_v_:
       tf_e, tf_v = linalg_ops.self_adjoint_eig(tf_a)
       # (complex) Eigenvectors are only unique up to an arbitrary phase
       # We normalize the vectors such that the first component has phase 0.
       top_rows = tf_v[..., 0:1, :]
       if tf_a.dtype.is_complex:
         angle = -math_ops.angle(top_rows)
         phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
       else:
         phase = math_ops.sign(top_rows)
       tf_v *= phase
       outputs = [tf_e, tf_v]
     else:
       tf_e = linalg_ops.self_adjoint_eigvals(tf_a)
       outputs = [tf_e]
     for b in outputs:
       x_init = np.random.uniform(
           low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
       if dtype_.is_complex:
         x_init += 1j * np.random.uniform(
             low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
       x_init += np.conj(x_init.T)
       x_init = np.tile(x_init, batch_shape + (1, 1))
       theoretical, numerical = gradient_checker.compute_gradient(
           tf_a,
           tf_a.get_shape().as_list(),
           b,
           b.get_shape().as_list(),
           x_init_value=x_init,
           delta=delta)
       self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
开发者ID:bunbutter,项目名称:tensorflow,代码行数:54,代码来源:self_adjoint_eig_op_test.py

示例11: Compute

 def Compute(x):
   e, v = linalg_ops.self_adjoint_eig(x)
   # (complex) Eigenvectors are only unique up to an arbitrary phase
   # We normalize the vectors such that the first component has phase 0.
   top_rows = v[..., 0:1, :]
   if dtype_.is_complex:
     angle = -math_ops.angle(top_rows)
     phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
   else:
     phase = math_ops.sign(top_rows)
   v *= phase
   return e, v
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:12,代码来源:self_adjoint_eig_op_test.py

示例12: cosine_decay_fn

 def cosine_decay_fn(global_step):
   if global_step is None:
     raise ValueError("global_step is required for cosine_decay.")
   global_step = math_ops.minimum(global_step, decay_steps)
   completed_fraction = math_ops.to_float(global_step) / math_ops.to_float(
       decay_steps)
   fraction = 2.0 * num_periods * completed_fraction
   decayed = 0.5 * (
       1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
   if zero_after is not None:
     decayed = array_ops.where(
         math_ops.greater_equal(fraction, 2 * zero_after), 0.0, decayed)
   return decayed
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:13,代码来源:sign_decay.py

示例13: _add_sinusoids_signal

    def _add_sinusoids_signal(x, time, min_timescale=1.0, max_timescale=1.0e4):
        """Adds a bunch of sinusoids of different frequencies to a Tensor.

        Each channel of the input Tensor is incremented by a sinusoid of a different
        frequency and phase.

        This allows attention to learn to use absolute and relative positions.
        Timing signals should be added to some precursors of both the query and the
        memory inputs to attention.

        The use of relative position is possible because sin(x+y) and cos(x+y) can be
        experessed in terms of y, sin(x) and cos(x).

        In particular, we use a geometric sequence of timescales starting with
        min_timescale and ending with max_timescale.  The number of different
        timescales is equal to channels / 2. For each timescale, we
        generate the two sinusoidal signals sin(timestep/timescale) and
        cos(timestep/timescale).  All of these sinusoids are concatenated in
        the channels dimension.

        Args:
          x: a Tensor with shape [batch, length, channels]
          min_timescale: a float
          max_timescale: a float

        Returns:
          a Tensor the same shape as x.
        """
        channels = x.get_shape().as_list()[-1]
        if x.get_shape().ndims == 3:  # [batch_size, timesteps, dim]
            length = array_ops.shape(x)[1]
            position = math_ops.to_float(math_ops.range(length))
        elif x.get_shape().ndims == 2:  # [batch_size, dim]
            length = 1
            position = math_ops.to_float(math_ops.range(time, time + 1))
        else:
            raise ValueError("need a Tensor with rank 2 or 3")
        num_timescales = channels // 2
        log_timescale_increment = (
            math.log(float(max_timescale) / float(min_timescale)) /
            (math_ops.to_float(num_timescales) - 1))
        inv_timescales = min_timescale * math_ops.exp(
            math_ops.to_float(math_ops.range(num_timescales)) * -log_timescale_increment)
        scaled_time = array_ops.expand_dims(position, 1) * array_ops.expand_dims(inv_timescales, 0)
        signal = array_ops.concat([math_ops.sin(scaled_time), math_ops.cos(scaled_time)], axis=1)
        signal = array_ops.pad(signal, [[0, 0], [0, math_ops.mod(channels, 2)]])
        if x.get_shape().ndims == 3:
            signal = array_ops.reshape(signal, [1, length, channels])
        else:
            signal = array_ops.reshape(signal, [1, channels])
        return x + signal
开发者ID:KIngpon,项目名称:NJUNMT-tf,代码行数:51,代码来源:embedding.py

示例14: restart_decay_fn

 def restart_decay_fn(global_step):
   if global_step is None:
     raise ValueError("global_step is required for cosine_decay.")
   global_step = math_ops.minimum(global_step, decay_steps)
   num = math_ops.mod(num_periods * math_ops.to_float(global_step),
                      decay_steps)
   fraction = num / math_ops.to_float(decay_steps)
   decayed = 0.5 * (
       1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
   if zero_after is not None:
     tmp = math_ops.to_float(
         num_periods * global_step) / math_ops.to_float(decay_steps)
     decayed = array_ops.where(
         math_ops.greater_equal(tmp, zero_after), 0.0, decayed)
   return decayed
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:15,代码来源:sign_decay.py

示例15: __call__

  def __call__(self, step):
    with ops.name_scope_v2(self.name or "CosineDecay"):
      initial_learning_rate = ops.convert_to_tensor(
          self.initial_learning_rate, name="initial_learning_rate")
      dtype = initial_learning_rate.dtype
      decay_steps = math_ops.cast(self.decay_steps, dtype)

      global_step_recomp = math_ops.cast(step, dtype)
      global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
      completed_fraction = global_step_recomp / decay_steps
      cosine_decayed = 0.5 * (1.0 + math_ops.cos(
          constant_op.constant(math.pi) * completed_fraction))

      decayed = (1 - self.alpha) * cosine_decayed + self.alpha
      return math_ops.multiply(initial_learning_rate, decayed)
开发者ID:aritratony,项目名称:tensorflow,代码行数:15,代码来源:learning_rate_schedule.py


注:本文中的tensorflow.python.ops.math_ops.cos函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。