当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.maximum方法代码示例

本文整理汇总了Python中tensorflow.maximum方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.maximum方法的具体用法?Python tensorflow.maximum怎么用?Python tensorflow.maximum使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.maximum方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: apply_perturbations

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import maximum [as 别名]
def apply_perturbations(i, j, X, increase, theta, clip_min, clip_max):
    """
    TensorFlow implementation for apply perturbations to input features based
    on salency maps
    :param i: index of first selected feature
    :param j: index of second selected feature
    :param X: a matrix containing our input features for our sample
    :param increase: boolean; true if we are increasing pixels, false otherwise
    :param theta: delta for each feature adjustment
    :param clip_min: mininum value for a feature in our sample
    :param clip_max: maximum value for a feature in our sample
    : return: a perturbed input feature matrix for a target class
    """

    # perturb our input sample
    if increase:
        X[0, i] = np.minimum(clip_max, X[0, i] + theta)
        X[0, j] = np.minimum(clip_max, X[0, j] + theta)
    else:
        X[0, i] = np.maximum(clip_min, X[0, i] - theta)
        X[0, j] = np.maximum(clip_min, X[0, j] - theta)

    return X 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:25,代码来源:attacks_tf.py

示例2: pitch_shift

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import maximum [as 别名]
def pitch_shift(
        spectrogram,
        semitone_shift=0.0,
        method=tf.image.ResizeMethod.BILINEAR):
    """ Pitch shift a spectrogram preserving shape in tensorflow. Note that
    this is an approximation in the frequency domain.

    :param spectrogram: Input spectrogram to be pitch shifted as tensor.
    :param semitone_shift: (Optional) Pitch shift in semitone, default to 0.0.
    :param mehtod: (Optional) Interpolation method, default to BILINEAR.
    :returns: Pitch shifted spectrogram (same shape as spectrogram).
    """
    factor = 2 ** (semitone_shift / 12.)
    T = tf.shape(spectrogram)[0]
    F = tf.shape(spectrogram)[1]
    F_ps = tf.cast(tf.cast(F, tf.float32) * factor, tf.int32)[0]
    ps_spec = tf.image.resize_images(
        spectrogram,
        [T, F_ps],
        method=method,
        align_corners=True)
    paddings = [[0, 0], [0, tf.maximum(0, F - F_ps)], [0, 0]]
    return tf.pad(ps_spec[:, :F, :], paddings, 'CONSTANT') 
开发者ID:deezer,项目名称:spleeter,代码行数:25,代码来源:spectrogram.py

示例3: _add_train_op

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import maximum [as 别名]
def _add_train_op(self):
    """Sets self._train_op, op to run for training."""
    hps = self._hps

    self._lr_rate = tf.maximum(
        hps.min_lr,  # min_lr_rate.
        tf.train.exponential_decay(hps.lr, self.global_step, 30000, 0.98))

    tvars = tf.trainable_variables()
    with tf.device(self._get_gpu(self._num_gpus-1)):
      grads, global_norm = tf.clip_by_global_norm(
          tf.gradients(self._loss, tvars), hps.max_grad_norm)
    tf.summary.scalar('global_norm', global_norm)
    optimizer = tf.train.GradientDescentOptimizer(self._lr_rate)
    tf.summary.scalar('learning rate', self._lr_rate)
    self._train_op = optimizer.apply_gradients(
        zip(grads, tvars), global_step=self.global_step, name='train_step') 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:19,代码来源:seq2seq_attention_model.py

示例4: clip_to_window

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import maximum [as 别名]
def clip_to_window(keypoints, window, scope=None):
  """Clips keypoints to a window.

  This op clips any input keypoints to a window.

  Args:
    keypoints: a tensor of shape [num_instances, num_keypoints, 2]
    window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
      window to which the op should clip the keypoints.
    scope: name scope.

  Returns:
    new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
  """
  with tf.name_scope(scope, 'ClipToWindow'):
    y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
    win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
    y = tf.maximum(tf.minimum(y, win_y_max), win_y_min)
    x = tf.maximum(tf.minimum(x, win_x_max), win_x_min)
    new_keypoints = tf.concat([y, x], 2)
    return new_keypoints 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:23,代码来源:keypoint_ops.py

示例5: intersection

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import maximum [as 别名]
def intersection(boxlist1, boxlist2, scope=None):
  """Compute pairwise intersection areas between boxes.

  Args:
    boxlist1: BoxList holding N boxes
    boxlist2: BoxList holding M boxes
    scope: name scope.

  Returns:
    a tensor with shape [N, M] representing pairwise intersections
  """
  with tf.name_scope(scope, 'Intersection'):
    y_min1, x_min1, y_max1, x_max1 = tf.split(
        value=boxlist1.get(), num_or_size_splits=4, axis=1)
    y_min2, x_min2, y_max2, x_max2 = tf.split(
        value=boxlist2.get(), num_or_size_splits=4, axis=1)
    all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
    all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
    intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
    all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))
    all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))
    intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
    return intersect_heights * intersect_widths 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:25,代码来源:box_list_ops.py

示例6: memory_run

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import maximum [as 别名]
def memory_run(step, nmaps, mem_size, batch_size, vocab_size,
               global_step, do_training, update_mem, decay_factor, num_gpus,
               target_emb_weights, output_w, gpu_targets_tn, it):
  """Run memory."""
  q = step[:, 0, it, :]
  mlabels = gpu_targets_tn[:, it, 0]
  res, mask, mem_loss = memory_call(
      q, mlabels, nmaps, mem_size, vocab_size, num_gpus, update_mem)
  res = tf.gather(target_emb_weights, res) * tf.expand_dims(mask[:, 0], 1)

  # Mix gold and original in the first steps, 20% later.
  gold = tf.nn.dropout(tf.gather(target_emb_weights, mlabels), 0.7)
  use_gold = 1.0 - tf.cast(global_step, tf.float32) / (1000. * decay_factor)
  use_gold = tf.maximum(use_gold, 0.2) * do_training
  mem = tf.cond(tf.less(tf.random_uniform([]), use_gold),
                lambda: use_gold * gold + (1.0 - use_gold) * res,
                lambda: res)
  mem = tf.reshape(mem, [-1, 1, 1, nmaps])
  return mem, mem_loss, update_mem 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:21,代码来源:neural_gpu.py

示例7: simulate

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import maximum [as 别名]
def simulate(self, action):
    with tf.name_scope("environment/simulate"):  # Do we need this?
      initializer = (tf.zeros_like(self._observ),
                     tf.fill((len(self),), 0.0), tf.fill((len(self),), False))

      def not_done_step(a, _):
        reward, done = self._batch_env.simulate(action)
        with tf.control_dependencies([reward, done]):
          # TODO(piotrmilos): possibly ignore envs with done
          r0 = tf.maximum(a[0], self._batch_env.observ)
          r1 = tf.add(a[1], reward)
          r2 = tf.logical_or(a[2], done)

          return (r0, r1, r2)

      simulate_ret = tf.scan(not_done_step, tf.range(self.skip),
                             initializer=initializer, parallel_iterations=1,
                             infer_shape=False)
      simulate_ret = [ret[-1, ...] for ret in simulate_ret]

      with tf.control_dependencies([self._observ.assign(simulate_ret[0])]):
        return tf.identity(simulate_ret[1]), tf.identity(simulate_ret[2]) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:24,代码来源:tf_atari_wrappers.py

示例8: average_sharded_losses

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import maximum [as 别名]
def average_sharded_losses(sharded_losses):
  """Average losses across datashards.

  Args:
    sharded_losses: list<dict<str loss_name, Tensor loss>>. The loss
      can be a single Tensor or a 2-tuple (numerator and denominator).

  Returns:
    losses: dict<str loss_name, Tensor avg_loss>
  """
  losses = {}
  for loss_name in sorted(sharded_losses[0]):
    all_shards = [shard_losses[loss_name] for shard_losses in sharded_losses]
    if isinstance(all_shards[0], tuple):
      sharded_num, sharded_den = zip(*all_shards)
      mean_loss = (
          tf.add_n(sharded_num) / tf.maximum(
              tf.cast(1.0, sharded_den[0].dtype), tf.add_n(sharded_den)))
    else:
      mean_loss = tf.reduce_mean(all_shards)

    losses[loss_name] = mean_loss
  return losses 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:25,代码来源:t2t_model.py

示例9: rank_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import maximum [as 别名]
def rank_loss(sentence_emb, image_emb, margin=0.2):
  """Experimental rank loss, thanks to kkurach@ for the code."""
  with tf.name_scope("rank_loss"):
    # Normalize first as this is assumed in cosine similarity later.
    sentence_emb = tf.nn.l2_normalize(sentence_emb, 1)
    image_emb = tf.nn.l2_normalize(image_emb, 1)
    # Both sentence_emb and image_emb have size [batch, depth].
    scores = tf.matmul(image_emb, tf.transpose(sentence_emb))  # [batch, batch]
    diagonal = tf.diag_part(scores)  # [batch]
    cost_s = tf.maximum(0.0, margin - diagonal + scores)  # [batch, batch]
    cost_im = tf.maximum(
        0.0, margin - tf.reshape(diagonal, [-1, 1]) + scores)  # [batch, batch]
    # Clear diagonals.
    batch_size = tf.shape(sentence_emb)[0]
    empty_diagonal_mat = tf.ones_like(cost_s) - tf.eye(batch_size)
    cost_s *= empty_diagonal_mat
    cost_im *= empty_diagonal_mat
    return tf.reduce_mean(cost_s) + tf.reduce_mean(cost_im) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:20,代码来源:slicenet.py

示例10: forward

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import maximum [as 别名]
def forward(self):
		self.out = tf.maximum(
			.1 * self.inp.out, 
			self.inp.out, 
			name = self.scope
		) 
开发者ID:AmeyaWagh,项目名称:Traffic_sign_detection_YOLO,代码行数:8,代码来源:simple.py

示例11: lrelu

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import maximum [as 别名]
def lrelu(inputs, alpha=0.2):
  return tf.maximum(alpha * inputs, inputs) 
开发者ID:acheketa,项目名称:cwavegan,代码行数:4,代码来源:tpu_model.py

示例12: apply_phaseshuffle

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import maximum [as 别名]
def apply_phaseshuffle(x, rad, pad_type='reflect'):
  b, x_len, nch = x.get_shape().as_list()

  phase = tf.random_uniform([], minval=-rad, maxval=rad + 1, dtype=tf.int32)
  pad_l = tf.maximum(phase, 0)
  pad_r = tf.maximum(-phase, 0)
  phase_start = pad_r
  x = tf.pad(x, [[0, 0], [pad_l, pad_r], [0, 0]], mode=pad_type)

  x = x[:, phase_start:phase_start+x_len]
  x.set_shape([b, x_len, nch])

  return x 
开发者ID:acheketa,项目名称:cwavegan,代码行数:15,代码来源:tpu_model.py

示例13: leaky_relu

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import maximum [as 别名]
def leaky_relu(x, alpha=0.2):
    with tf.name_scope('LeakyRelu'):
        alpha = tf.constant(alpha, dtype=x.dtype, name='alpha')
        return tf.maximum(x * alpha, x)

#----------------------------------------------------------------------------
# Nearest-neighbor upscaling layer. 
开发者ID:zalandoresearch,项目名称:disentangling_conditional_gans,代码行数:9,代码来源:networks.py

示例14: getLoss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import maximum [as 别名]
def getLoss(trueCosSim, falseCosSim, margin):
        zero = tf.fill(tf.shape(trueCosSim), 0.0)
        tfMargin = tf.fill(tf.shape(trueCosSim), margin)
        with tf.name_scope("loss"):
            losses = tf.maximum(zero, tf.subtract(tfMargin, tf.subtract(trueCosSim, falseCosSim)))
            loss = tf.reduce_sum(losses)
        return loss 
开发者ID:shuaihuaiyi,项目名称:QA,代码行数:9,代码来源:qaLSTMNet.py

示例15: generate

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import maximum [as 别名]
def generate(self, x, **kwargs):
        """
        Return a tensor that constructs adversarial examples for the given
        input. Generate uses tf.py_func in order to operate over tensors.

        :param x: (required) A tensor with the inputs.
        :param y_target: (required) A tensor with the one-hot target labels.
        :param batch_size: The number of inputs to include in a batch and
                           process simultaneously.
        :param binary_search_steps: The number of times we perform binary
                                    search to find the optimal tradeoff-
                                    constant between norm of the purturbation
                                    and cross-entropy loss of classification.
        :param max_iterations: The maximum number of iterations.
        :param initial_const: The initial tradeoff-constant to use to tune the
                              relative importance of size of the perturbation
                              and cross-entropy loss of the classification.
        :param clip_min: (optional float) Minimum input component value
        :param clip_max: (optional float) Maximum input component value
        """
        import tensorflow as tf
        from .attacks_tf import LBFGS_attack
        self.parse_params(**kwargs)

        _, nb_classes = self.get_or_guess_labels(x, kwargs)

        attack = LBFGS_attack(
            self.sess, x, self.model.get_probs(x), self.y_target,
            self.binary_search_steps, self.max_iterations, self.initial_const,
            self.clip_min, self.clip_max, nb_classes, self.batch_size)

        def lbfgs_wrap(x_val, y_val):
            return np.array(attack.attack(x_val, y_val), dtype=self.np_dtype)

        wrap = tf.py_func(lbfgs_wrap, [x, self.y_target], self.tf_dtype)
        wrap.set_shape(x.get_shape())

        return wrap 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:40,代码来源:attacks.py


注:本文中的tensorflow.maximum方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。