當前位置: 首頁>>代碼示例>>Python>>正文


Python tensorflow.sign方法代碼示例

本文整理匯總了Python中tensorflow.sign方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.sign方法的具體用法?Python tensorflow.sign怎麽用?Python tensorflow.sign使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.sign方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _quantize

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sign [as 別名]
def _quantize(x, params, randomize=True):
  """Quantize x according to params, optionally randomizing the rounding."""
  if not params.quantize:
    return x

  if not randomize:
    return tf.bitcast(
        tf.cast(x / params.quantization_scale, tf.int16), tf.float16)

  abs_x = tf.abs(x)
  sign_x = tf.sign(x)
  y = abs_x / params.quantization_scale
  y = tf.floor(y + tf.random_uniform(common_layers.shape_list(x)))
  y = tf.minimum(y, tf.int16.max) * sign_x
  q = tf.bitcast(tf.cast(y, tf.int16), tf.float16)
  return q 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:18,代碼來源:diet.py

示例2: _to_bfloat16_unbiased

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sign [as 別名]
def _to_bfloat16_unbiased(x, noise):
  """Convert a float32 to a bfloat16 using randomized roundoff.

  Args:
    x: A float32 Tensor.
    noise: a float32 Tensor with values in [0, 1), broadcastable to tf.shape(x)
  Returns:
    A float32 Tensor.
  """
  x_sign = tf.sign(x)
  # Make sure x is positive.  If it is zero, the two candidates are identical.
  x = x * x_sign + 1e-30
  cand1 = tf.to_bfloat16(x)
  cand1_f = tf.to_float(cand1)
  # This relies on the fact that for a positive bfloat16 b,
  # b * 1.005 gives you the next higher bfloat16 and b*0.995 gives you the
  # next lower one. Both 1.005 and 0.995 are ballpark estimation.
  cand2 = tf.to_bfloat16(
      tf.where(tf.greater(x, cand1_f), cand1_f * 1.005, cand1_f * 0.995))
  ret = _randomized_roundoff_to_bfloat16(x, noise, cand1, cand2)
  return ret * tf.to_bfloat16(x_sign) 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:23,代碼來源:quantization.py

示例3: _apply_sparse

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sign [as 別名]
def _apply_sparse(self, grad, var):
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        alpha_t = math_ops.cast(self._alpha_t, var.dtype.base_dtype)
        beta_t = math_ops.cast(self._beta_t, var.dtype.base_dtype)

        eps = 1e-7  # cap for moving average

        m = self.get_slot(var, "m")
        m_slice = tf.gather(m, grad.indices)
        m_t = state_ops.scatter_update(m, grad.indices,
                                       tf.maximum(beta_t * m_slice + eps, tf.abs(grad.values)))
        m_t_slice = tf.gather(m_t, grad.indices)

        var_update = state_ops.scatter_sub(var, grad.indices, lr_t * grad.values * tf.exp(
            tf.log(alpha_t) * tf.sign(grad.values) * tf.sign(m_t_slice)))  # Update 'ref' by subtracting 'value
        # Create an op that groups multiple operations.
        # When this op finishes, all ops in input have finished
        return control_flow_ops.group(*[var_update, m_t]) 
開發者ID:ChenglongChen,項目名稱:tensorflow-XNN,代碼行數:20,代碼來源:optimizer.py

示例4: divide

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sign [as 別名]
def divide(x, y, safe_mode=True, epsilon=None, name=None):
    """ A wrapper of `tf.divide`, computes Python style division of x by y but extends safe divide support.
        If safe_mode is `True` or epsilon is given(a small float number), the absolute value of denominator
        in the division will be clip to make sure it's bigger than epsilon(default is 1e-13).

    Args:
        safe_mode: Use safe divide mode.
        epsilon: Float number. Default is `1e-13`.
    """
    if not safe_mode and epsilon is None:
        return tf.divide(x, y, name=name)
    else:
        epsilon = 1e-20 if epsilon is None else epsilon
        name = "safe_divide" if name is None else name
        with tf.name_scope(name):
            y = tf.where(tf.greater(tf.abs(y), epsilon), y, y + tf.sign(y) * epsilon)
            return tf.divide(x, y) 
開發者ID:naturomics,項目名稱:CapsLayer,代碼行數:19,代碼來源:math_ops.py

示例5: call

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sign [as 別名]
def call(self, inputs):
    """In layers should be of shape dtype tf.float32, (None, self.max_atoms, 4)"""
    atom_numbers = tf.cast(inputs[:, :, 0], tf.int32)
    flags = tf.sign(atom_numbers)
    flags = tf.cast(
        tf.expand_dims(flags, 1) * tf.expand_dims(flags, 2), tf.float32)
    coordinates = inputs[:, :, 1:]
    if self.coordinates_in_bohr:
      coordinates = coordinates * 0.52917721092

    d = self.distance_matrix(coordinates, flags)

    d_radial_cutoff = self.distance_cutoff(d, self.radial_cutoff, flags)
    d_angular_cutoff = self.distance_cutoff(d, self.angular_cutoff, flags)

    radial_sym = self.radial_symmetry(d_radial_cutoff, d, atom_numbers)
    angular_sym = self.angular_symmetry(d_angular_cutoff, d, atom_numbers,
                                        coordinates)
    return tf.concat(
        [
            tf.cast(tf.expand_dims(atom_numbers, 2), tf.float32), radial_sym,
            angular_sym
        ],
        axis=2) 
開發者ID:deepchem,項目名稱:deepchem,代碼行數:26,代碼來源:layers.py

示例6: _sample

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sign [as 別名]
def _sample(self, n_samples):
        # samples must be sampled from (-1, 1) rather than [-1, 1)
        loc, scale = self.loc, self.scale
        if not self.is_reparameterized:
            loc = tf.stop_gradient(loc)
            scale = tf.stop_gradient(scale)
        shape = tf.concat([[n_samples], self.batch_shape], 0)
        uniform_samples = tf.random_uniform(
            shape=shape,
            minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
                                self.dtype.as_numpy_dtype(0.)),
            maxval=1.,
            dtype=self.dtype)
        samples = loc - scale * tf.sign(uniform_samples) * \
            tf.log1p(-tf.abs(uniform_samples))
        static_n_samples = n_samples if isinstance(n_samples, int) else None
        samples.set_shape(
            tf.TensorShape([static_n_samples]).concatenate(
                self.get_batch_shape()))
        return samples 
開發者ID:thu-ml,項目名稱:zhusuan,代碼行數:22,代碼來源:univariate.py

示例7: __call__

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sign [as 別名]
def __call__(
            self,
            input_sequence,
            regularizer,
            dropout_rate,
            is_training=True
    ):
        embedded, embedding_size = self.embed(
            input_sequence,
            regularizer,
            dropout_rate,
            is_training=True
        )

        if self.mask:
            mask_matrix = tf.cast(
                tf.expand_dims(tf.sign(tf.abs(input_sequence)), -1),
                dtype=tf.float32
            )
            embedded = tf.multiply(embedded, mask_matrix)

        return embedded, embedding_size 
開發者ID:uber,項目名稱:ludwig,代碼行數:24,代碼來源:embedding_modules.py

示例8: __init__

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sign [as 別名]
def __init__(self, n_features, lenscale=None, p=1, variational=False,
                 learn_lenscale=False):
        """Create an instance of an arc cosine kernel layer."""
        # Setup random weights
        if variational:
            kern = RBFVariational(lenscale=lenscale,
                                  learn_lenscale=learn_lenscale)
        else:
            kern = RBF(lenscale=lenscale, learn_lenscale=learn_lenscale)
        super().__init__(n_features=n_features, kernel=kern)

        # Kernel order
        assert isinstance(p, int) and p >= 0
        if p == 0:
            self.pfunc = tf.sign
        elif p == 1:
            self.pfunc = lambda x: x
        else:
            self.pfunc = lambda x: tf.pow(x, p) 
開發者ID:gradientinstitute,項目名稱:aboleth,代碼行數:21,代碼來源:layers.py

示例9: _last_relevant

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sign [as 別名]
def _last_relevant(outputs, sequence_length):
        """Deprecated"""
        batch_size = tf.shape(outputs)[0]
        max_length = outputs.get_shape()[1]
        output_size = outputs.get_shape()[2]
        index = tf.range(0, batch_size) * max_length + (sequence_length - 1)
        flat = tf.reshape(outputs, [-1, output_size])
        last_timesteps = tf.gather(flat, index)  # very slow
        # mask = tf.sign(index)
        # last_timesteps = tf.boolean_mask(flat, mask)
        # # Creating a vector of 0s and 1s that will specify what timesteps to choose.
        # partitions = tf.reduce_sum(tf.one_hot(index, tf.shape(flat)[0], dtype='int32'), 0)
        # # Selecting the elements we want to choose.
        # _, last_timesteps = tf.dynamic_partition(flat, partitions, 2)  # (batch_size, n_dim)
        # https://stackoverflow.com/questions/35892412/tensorflow-dense-gradient-explanation
        return last_timesteps 
開發者ID:Lapis-Hong,項目名稱:atec-nlp,代碼行數:18,代碼來源:encoder.py

示例10: random_signs

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sign [as 別名]
def random_signs(num_elements, seed, dtype=tf.float32):
  """Returns a Tensor of `num_elements` random +1/-1 values as `dtype`.

  If run twice with the same seeds, it will produce the same pseudorandom
  numbers. The output is consistent across multiple runs on the same hardware
  (and between CPU and GPU), but may change between versions of TensorFlow or
  on non-CPU/GPU hardware.

  If consistency is required, use `random_signs_cmwc` instead.

  Args:
    num_elements: A Python integer. The number of random values to be generated.
    seed: A shape [2] integer Tensor of seeds to the random number generator.
    dtype: The type of the output.

  Returns:
    A Tensor of `num_elements` random +1/-1 values as `dtype`.
  """
  return tf.cast(
      tf.sign(tf.random.stateless_uniform([num_elements], seed) - 0.5), dtype) 
開發者ID:tensorflow,項目名稱:model-optimization,代碼行數:22,代碼來源:tf_utils.py

示例11: concordance_index2

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sign [as 別名]
def concordance_index2(y_true, y_pred):
  total_pairs = 0
  sum_score = 0.0
  for i in range(len(y_true) - 1):
    y_true_1 = y_true[i]
    y_pred_1 = y_pred[i]
    for j in range(i+1, len(y_true)):
      y_true_2 = y_true[j]      
      if y_true_1 == y_true_2:
        continue
      y_pred_2 = y_pred[j]
      total_pairs += 1
      if y_pred_1 == y_pred_2:
        sum_score += 0.5
        continue
      concord = np.sign(y_true_1 - y_true_2) == np.sign(y_pred_1 - y_pred_2)
      sum_score += concord * 1.0
  return sum_score/total_pairs 
開發者ID:simonfqy,項目名稱:PADME,代碼行數:20,代碼來源:toy.py

示例12: build

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sign [as 別名]
def build(self):
    """ tensorflow computation graph for transform """
    graph = tf.Graph()
    with graph.as_default():
      self.inputs = tf.placeholder(tf.float32, shape=(None, self.max_atoms, 4))
      atom_numbers = tf.cast(self.inputs[:, :, 0], tf.int32)
      flags = tf.sign(atom_numbers)
      flags = tf.to_float(tf.expand_dims(flags, 1) * tf.expand_dims(flags, 2))
      coordinates = self.inputs[:, :, 1:]
      if self.coordinates_in_bohr:
        coordinates = coordinates * 0.52917721092
      d = self.distance_matrix(coordinates, flags)
      d_radial_cutoff = self.distance_cutoff(d, self.radial_cutoff, flags)
      d_angular_cutoff = self.distance_cutoff(d, self.angular_cutoff, flags)
      radial_sym = self.radial_symmetry(d_radial_cutoff, d, atom_numbers)
      angular_sym = self.angular_symmetry(d_angular_cutoff, d, atom_numbers,
                                          coordinates)
      self.outputs = tf.concat(
          [
              tf.to_float(tf.expand_dims(atom_numbers, 2)), radial_sym,
              angular_sym
          ],
          axis=2)
    return graph 
開發者ID:simonfqy,項目名稱:PADME,代碼行數:26,代碼來源:transformers.py

示例13: update_state

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sign [as 別名]
def update_state(self, values, sample_weight=None):
        values = tf.cast(values, self.values_dtype)

        if not self.built:
            with tf.name_scope(self.name), tf.init_scope():
                self.build(values.shape)

        unchanged_values = tf.math.count_nonzero(
            tf.equal(self._previous_values, values)
        )
        flip_ratio = 1 - (
            tf.cast(unchanged_values, self.dtype) / tf.cast(self._size, self.dtype)
        )

        update_total_op = self.total.assign_add(flip_ratio * tf.sign(self.count))
        with tf.control_dependencies([update_total_op]):
            update_count_op = self.count.assign_add(1)
            with tf.control_dependencies([update_count_op]):
                return self._previous_values.assign(values) 
開發者ID:larq,項目名稱:larq,代碼行數:21,代碼來源:metrics.py

示例14: sign

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sign [as 別名]
def sign(x):
    r"""A sign function that will never be zero
    \\[
    f(x) = \begin{cases}
      -1 & x < 0 \\\
      \hphantom{-}1 & x \geq 0
    \end{cases}
    \\]

    This function is similar to
    [`tf.math.sign`](https://www.tensorflow.org/api_docs/python/tf/math/sign) but will
    return a binary value and will never be zero.

    # Arguments
        `x`: Input Tensor

    # Returns
        A Tensor with same type as `x`.
    """
    return tf.sign(tf.sign(x) + 0.1) 
開發者ID:larq,項目名稱:larq,代碼行數:22,代碼來源:math.py

示例15: heaviside

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import sign [as 別名]
def heaviside(x):
    r"""Heaviside step function with output values 0 and 1.

    \\[
    q(x) = \begin{cases}
    +1 & x > 0 \\\
    \hphantom{+}0 & x \leq 0
    \end{cases}
    \\]

    # Arguments
        `x`: Input Tensor

    # Returns
        A Tensor with same type as `x`.
    """
    return tf.sign(tf.nn.relu(x)) 
開發者ID:larq,項目名稱:larq,代碼行數:19,代碼來源:math.py


注:本文中的tensorflow.sign方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。