當前位置: 首頁>>代碼示例>>Python>>正文


Python tensorflow.abs方法代碼示例

本文整理匯總了Python中tensorflow.abs方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.abs方法的具體用法?Python tensorflow.abs怎麽用?Python tensorflow.abs使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.abs方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: generate

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import abs [as 別名]
def generate(self, x, **kwargs):
        """
        Generates the adversarial sample for the given input.
        :param x: The model's inputs.
        :param eps: (optional float) attack step size (input variation)
        :param ord: (optional) Order of the norm (mimics NumPy).
                    Possible values: np.inf, 1 or 2.
        :param y: (optional) A tf variable` with the model labels. Only provide
                  this parameter if you'd like to use true labels when crafting
                  adversarial samples. Otherwise, model predictions are used as
                  labels to avoid the "label leaking" effect (explained in this
                  paper: https://arxiv.org/abs/1611.01236). Default is None.
                  Labels should be one-hot-encoded.
        :param y_target: (optional) A tf variable` with the labels to target.
                            Leave y_target=None if y is also set.
                            Labels should be one-hot-encoded.
        :param clip_min: (optional float) Minimum input component value
        :param clip_max: (optional float) Maximum input component value
        """
        # Parse and save attack-specific parameters
        assert self.parse_params(**kwargs)
        labels, nb_classes = self.get_or_guess_labels(x, kwargs)
        return self.fgm(x, labels=labels, targeted=(self.y_target is not None)) 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:25,代碼來源:attacks_tfe.py

示例2: _build_stft_feature

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import abs [as 別名]
def _build_stft_feature(self):
        """ Compute STFT of waveform and slice the STFT in segment
         with the right length to feed the network.
        """

        stft_name = self.stft_name
        spec_name = self.spectrogram_name

        if stft_name not in self._features:
            stft_feature = tf.transpose(
                stft(
                    tf.transpose(self._features['waveform']),
                    self._frame_length,
                    self._frame_step,
                    window_fn=lambda frame_length, dtype: (
                        hann_window(frame_length, periodic=True, dtype=dtype)),
                    pad_end=True),
                perm=[1, 2, 0])
            self._features[f'{self._mix_name}_stft'] = stft_feature
        if spec_name not in self._features:
            self._features[spec_name] = tf.abs(
                pad_and_partition(self._features[stft_name], self._T))[:, :, :self._F, :] 
開發者ID:deezer,項目名稱:spleeter,代碼行數:24,代碼來源:__init__.py

示例3: _compute_loss

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import abs [as 別名]
def _compute_loss(self, prediction_tensor, target_tensor, weights):
    """Compute loss function.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        code_size] representing the (encoded) predicted locations of objects.
      target_tensor: A float tensor of shape [batch_size, num_anchors,
        code_size] representing the regression targets
      weights: a float tensor of shape [batch_size, num_anchors]

    Returns:
      loss: a (scalar) tensor representing the value of the loss function
    """
    diff = prediction_tensor - target_tensor
    abs_diff = tf.abs(diff)
    abs_diff_lt_1 = tf.less(abs_diff, 1)
    anchorwise_smooth_l1norm = tf.reduce_sum(
        tf.where(abs_diff_lt_1, 0.5 * tf.square(abs_diff), abs_diff - 0.5),
        2) * weights
    if self._anchorwise_output:
      return anchorwise_smooth_l1norm
    return tf.reduce_sum(anchorwise_smooth_l1norm) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:24,代碼來源:losses.py

示例4: __init__

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import abs [as 別名]
def __init__(self, pad_mask):
    """Compute and store the location of the padding.

    Args:
      pad_mask (tf.Tensor): Reference padding tensor of shape
        [batch_size,length] or [dim_origin] (dim_origin=batch_size*length)
        containing non-zeros positive values to indicate padding location.
    """
    self.nonpad_ids = None
    self.dim_origin = None

    with tf.name_scope("pad_reduce/get_ids"):
      pad_mask = tf.reshape(pad_mask, [-1])  # Flatten the batch
      # nonpad_ids contains coordinates of zeros rows (as pad_mask is
      # float32, checking zero equality is done with |x| < epsilon, with
      # epsilon=1e-9 as standard, here pad_mask only contains positive values
      # so tf.abs would be redundant)
      self.nonpad_ids = tf.to_int32(tf.where(pad_mask < 1e-9))
      self.dim_origin = tf.shape(pad_mask)[:1] 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:21,代碼來源:expert_utils.py

示例5: _quantize

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import abs [as 別名]
def _quantize(x, params, randomize=True):
  """Quantize x according to params, optionally randomizing the rounding."""
  if not params.quantize:
    return x

  if not randomize:
    return tf.bitcast(
        tf.cast(x / params.quantization_scale, tf.int16), tf.float16)

  abs_x = tf.abs(x)
  sign_x = tf.sign(x)
  y = abs_x / params.quantization_scale
  y = tf.floor(y + tf.random_uniform(common_layers.shape_list(x)))
  y = tf.minimum(y, tf.int16.max) * sign_x
  q = tf.bitcast(tf.cast(y, tf.int16), tf.float16)
  return q 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:18,代碼來源:diet.py

示例6: neural_gpu_body

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import abs [as 別名]
def neural_gpu_body(inputs, hparams, name=None):
  """The core Neural GPU."""
  with tf.variable_scope(name, "neural_gpu"):

    def step(state, inp):  # pylint: disable=missing-docstring
      x = tf.nn.dropout(state, 1.0 - hparams.dropout)
      for layer in range(hparams.num_hidden_layers):
        x = common_layers.conv_gru(
            x, (hparams.kernel_height, hparams.kernel_width),
            hparams.hidden_size,
            name="cgru_%d" % layer)
      # Padding input is zeroed-out in the modality, we check this by summing.
      padding_inp = tf.less(tf.reduce_sum(tf.abs(inp), axis=[1, 2]), 0.00001)
      new_state = tf.where(padding_inp, state, x)  # No-op where inp is padding.
      return new_state

    return tf.foldl(
        step,
        tf.transpose(inputs, [1, 0, 2, 3]),
        initializer=inputs,
        parallel_iterations=1,
        swap_memory=True) 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:24,代碼來源:neural_gpu.py

示例7: gated_linear_unit_layer

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import abs [as 別名]
def gated_linear_unit_layer(x, name=None):
  """Gated linear unit layer.

  Paper: Language Modeling with Gated Convolutional Networks.
  Link: https://arxiv.org/abs/1612.08083
  x = Wx * sigmoid(W'x).

  Args:
    x: A tensor
    name: A string

  Returns:
    A tensor of the same shape as x.
  """
  with tf.variable_scope(name, default_name="glu_layer", values=[x]):
    depth = shape_list(x)[-1]
    x = tf.layers.dense(x, depth * 2, activation=None)
    x, gating_x = tf.split(x, 2, axis=-1)
    return x * tf.nn.sigmoid(gating_x) 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:21,代碼來源:common_layers.py

示例8: lp_loss

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import abs [as 別名]
def lp_loss(gen_frames, gt_frames, l_num):
    """
    Calculates the sum of lp losses between the predicted and ground truth frames.

    @param gen_frames: The predicted frames at each scale.
    @param gt_frames: The ground truth frames at each scale
    @param l_num: 1 or 2 for l1 and l2 loss, respectively).

    @return: The lp loss.
    """
    # calculate the loss for each scale
    scale_losses = []
    for i in xrange(len(gen_frames)):
        scale_losses.append(tf.reduce_sum(tf.abs(gen_frames[i] - gt_frames[i])**l_num))

    # condense into one tensor and avg
    return tf.reduce_mean(tf.pack(scale_losses)) 
開發者ID:dyelax,項目名稱:Adversarial_Video_Generation,代碼行數:19,代碼來源:loss_functions.py

示例9: GetItemPixels

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import abs [as 別名]
def GetItemPixels(self, I):
        '''
        Locates items that should be picked up on the screen
        '''
        ws = [8, 14]
        D1 = np.abs(I - np.array([10.8721,  12.8995,  13.9932])).sum(axis = 2) < 15
        D2 = np.abs(I - np.array([118.1302, 116.0938, 106.9063])).sum(axis = 2) < 76
        R1 = view_as_windows(D1, ws, ws).sum(axis = (2, 3))
        R2 = view_as_windows(D2, ws, ws).sum(axis = (2, 3))
        FR = ((R1 + R2 / np.prod(ws)) >= 1.0) & (R1 > 10) & (R2 > 10)
        PL = np.transpose(np.nonzero(FR)) * np.array(ws)
        if len(PL) <= 0:
            return []
        bc = Birch(threshold = 50, n_clusters = None)
        bc.fit(PL)
        return bc.subcluster_centers_ 
開發者ID:nicholastoddsmith,項目名稱:poeai,代碼行數:18,代碼來源:TargetingSystem.py

示例10: _apply_sparse

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import abs [as 別名]
def _apply_sparse(self, grad, var):
        lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
        alpha_t = math_ops.cast(self._alpha_t, var.dtype.base_dtype)
        beta_t = math_ops.cast(self._beta_t, var.dtype.base_dtype)

        eps = 1e-7  # cap for moving average

        m = self.get_slot(var, "m")
        m_slice = tf.gather(m, grad.indices)
        m_t = state_ops.scatter_update(m, grad.indices,
                                       tf.maximum(beta_t * m_slice + eps, tf.abs(grad.values)))
        m_t_slice = tf.gather(m_t, grad.indices)

        var_update = state_ops.scatter_sub(var, grad.indices, lr_t * grad.values * tf.exp(
            tf.log(alpha_t) * tf.sign(grad.values) * tf.sign(m_t_slice)))  # Update 'ref' by subtracting 'value
        # Create an op that groups multiple operations.
        # When this op finishes, all ops in input have finished
        return control_flow_ops.group(*[var_update, m_t]) 
開發者ID:ChenglongChen,項目名稱:tensorflow-XNN,代碼行數:20,代碼來源:optimizer.py

示例11: smooth_l1_regression_loss

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import abs [as 別名]
def smooth_l1_regression_loss(scores, labels, thres=1.0, is_mean=True):
    # L1(x) = 0.5x^2 (|x|<thres)
    # L1(x) = |x|-0.5 (|x|>=thres)
    diff =  tf.abs(scores - labels)
    thres_mat = thres*tf.ones(diff.get_shape())
    # thres_mat = thres*tf.ones((40, 4))
    smooth_sign = tf.cast(tf.less(diff, thres_mat), tf.float32)

    smooth_opt1 = 0.5*tf.multiply(diff, diff)
    smooth_opt2 = diff-0.5

    loss_mat = tf.multiply(smooth_opt1, smooth_sign) + tf.multiply(smooth_opt2, (1.0-smooth_sign))

    if is_mean:
        loss = tf.reduce_mean(loss_mat)
    else:
        loss = loss_mat
    return loss 
開發者ID:kanchen-usc,項目名稱:GroundeR,代碼行數:20,代碼來源:loss.py

示例12: generate

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import abs [as 別名]
def generate(self, x, **kwargs):
        """
        Generate symbolic graph for adversarial examples and return.

        :param x: The model's symbolic inputs.
        :param eps: (optional float) attack step size (input variation)
        :param ord: (optional) Order of the norm (mimics NumPy).
                    Possible values: np.inf, 1 or 2.
        :param y: (optional) A tensor with the model labels. Only provide
                  this parameter if you'd like to use true labels when crafting
                  adversarial samples. Otherwise, model predictions are used as
                  labels to avoid the "label leaking" effect (explained in this
                  paper: https://arxiv.org/abs/1611.01236). Default is None.
                  Labels should be one-hot-encoded.
        :param y_target: (optional) A tensor with the labels to target. Leave
                         y_target=None if y is also set. Labels should be
                         one-hot-encoded.
        :param clip_min: (optional float) Minimum input component value
        :param clip_max: (optional float) Maximum input component value
        """
        # Parse and save attack-specific parameters
        assert self.parse_params(**kwargs)

        from .attacks_tf import fgm

        labels, nb_classes = self.get_or_guess_labels(x, kwargs)

        return fgm(
            x,
            self.model.get_probs(x),
            y=labels,
            eps=self.eps,
            ord=self.ord,
            clip_min=self.clip_min,
            clip_max=self.clip_max,
            targeted=(self.y_target is not None)) 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:38,代碼來源:attacks.py

示例13: vatm

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import abs [as 別名]
def vatm(model,
         x,
         logits,
         eps,
         back='tf',
         num_iterations=1,
         xi=1e-6,
         clip_min=None,
         clip_max=None):
    """
    A wrapper for the perturbation methods used for virtual adversarial
    training : https://arxiv.org/abs/1507.00677
    It calls the right function, depending on the
    user's backend.

    :param model: the model which returns the network unnormalized logits
    :param x: the input placeholder
    :param logits: the model's unnormalized output tensor
    :param eps: the epsilon (input variation parameter)
    :param num_iterations: the number of iterations
    :param xi: the finite difference parameter
    :param clip_min: optional parameter that can be used to set a minimum
                    value for components of the example returned
    :param clip_max: optional parameter that can be used to set a maximum
                    value for components of the example returned
    :return: a tensor for the adversarial example

    """
    assert back == 'tf'
    # Compute VATM using TensorFlow
    from .attacks_tf import vatm as vatm_tf
    return vatm_tf(
        model,
        x,
        logits,
        eps,
        num_iterations=num_iterations,
        xi=xi,
        clip_min=clip_min,
        clip_max=clip_max) 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:42,代碼來源:attacks.py

示例14: generate_adversarial_examples_np

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import abs [as 別名]
def generate_adversarial_examples_np(self, ord, eps, **kwargs):
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        x_adv = self.attack.generate_np(x_val, eps=eps, ord=ord,
                                        clip_min=-5, clip_max=5, **kwargs)
        if ord == np.inf:
            delta = np.max(np.abs(x_adv - x_val), axis=1)
        elif ord == 1:
            delta = np.sum(np.abs(x_adv - x_val), axis=1)
        elif ord == 2:
            delta = np.sum(np.square(x_adv - x_val), axis=1)**.5

        return x_val, x_adv, delta 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:16,代碼來源:test_attacks.py

示例15: test_generate_np_can_be_called_with_different_eps

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import abs [as 別名]
def test_generate_np_can_be_called_with_different_eps(self):
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        for eps in [0.1, 0.2, 0.3, 0.4]:
            x_adv = self.attack.generate_np(x_val, eps=eps, ord=np.inf,
                                            clip_min=-5.0, clip_max=5.0)

            delta = np.max(np.abs(x_adv - x_val), axis=1)
            self.assertClose(delta, eps) 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:12,代碼來源:test_attacks.py


注:本文中的tensorflow.abs方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。