当前位置: 首页>>代码示例>>Python>>正文


Python v1.subtract方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.subtract方法的典型用法代码示例。如果您正苦于以下问题:Python v1.subtract方法的具体用法?Python v1.subtract怎么用?Python v1.subtract使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.subtract方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _flip_boxes_left_right

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import subtract [as 别名]
def _flip_boxes_left_right(boxes):
  """Left-right flip the boxes.

  Args:
    boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
           Boxes are in normalized form meaning their coordinates vary
           between [0, 1].
           Each row is in the form of [ymin, xmin, ymax, xmax].

  Returns:
    Flipped boxes.
  """
  ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
  flipped_xmin = tf.subtract(1.0, xmax)
  flipped_xmax = tf.subtract(1.0, xmin)
  flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1)
  return flipped_boxes 
开发者ID:JunweiLiang,项目名称:Object_Detection_Tracking,代码行数:19,代码来源:preprocessor.py

示例2: _get_cost_function

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import subtract [as 别名]
def _get_cost_function(self):
        """Compute the cost of the Mittens objective function.

        If self.mittens = 0, this is the same as the cost of GloVe.
        """
        self.weights = tf.placeholder(
            tf.float32, shape=[self.n_words, self.n_words])
        self.log_coincidence = tf.placeholder(
            tf.float32, shape=[self.n_words, self.n_words])
        self.diffs = tf.subtract(self.model, self.log_coincidence)
        cost = tf.reduce_sum(
            0.5 * tf.multiply(self.weights, tf.square(self.diffs)))
        if self.mittens > 0:
            self.mittens = tf.constant(self.mittens, tf.float32)
            cost += self.mittens * tf.reduce_sum(
                tf.multiply(
                    self.has_embedding,
                    self._tf_squared_euclidean(
                        tf.add(self.W, self.C),
                        self.original_embedding)))
        tf.summary.scalar("cost", cost)
        return cost 
开发者ID:roamanalytics,项目名称:mittens,代码行数:24,代码来源:tf_mittens.py

示例3: _flip_boxes_left_right

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import subtract [as 别名]
def _flip_boxes_left_right(boxes):
  """Left-right flip the boxes.

  Args:
    boxes: Float32 tensor containing the bounding boxes -> [..., 4].
           Boxes are in normalized form meaning their coordinates vary
           between [0, 1].
           Each last dimension is in the form of [ymin, xmin, ymax, xmax].

  Returns:
    Flipped boxes.
  """
  ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=-1)
  flipped_xmin = tf.subtract(1.0, xmax)
  flipped_xmax = tf.subtract(1.0, xmin)
  flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], axis=-1)
  return flipped_boxes 
开发者ID:tensorflow,项目名称:models,代码行数:19,代码来源:preprocessor.py

示例4: _flip_boxes_up_down

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import subtract [as 别名]
def _flip_boxes_up_down(boxes):
  """Up-down flip the boxes.

  Args:
    boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
           Boxes are in normalized form meaning their coordinates vary
           between [0, 1].
           Each row is in the form of [ymin, xmin, ymax, xmax].

  Returns:
    Flipped boxes.
  """
  ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
  flipped_ymin = tf.subtract(1.0, ymax)
  flipped_ymax = tf.subtract(1.0, ymin)
  flipped_boxes = tf.concat([flipped_ymin, xmin, flipped_ymax, xmax], 1)
  return flipped_boxes 
开发者ID:tensorflow,项目名称:models,代码行数:19,代码来源:preprocessor.py

示例5: test_forward_multi_input

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import subtract [as 别名]
def test_forward_multi_input():
    with tf.Graph().as_default():
        in1 = tf.placeholder(tf.int32, shape=[3, 3], name='in1')
        in2 = tf.placeholder(tf.int32, shape=[3, 3], name='in2')
        in3 = tf.placeholder(tf.int32, shape=[3, 3], name='in3')
        in4 = tf.placeholder(tf.int32, shape=[3, 3], name='in4')

        out1 = tf.add(in1, in2, name='out1')
        out2 = tf.subtract(in3, in4, name='out2')
        out = tf.multiply(out1, out2, name='out')
        in_data = np.arange(9, dtype='int32').reshape([3, 3])

        compare_tf_with_tvm([in_data, in_data, in_data, in_data],
                            ['in1:0', 'in2:0', 'in3:0', 'in4:0'], 'out:0')

#######################################################################
# Multi Output to Graph
# --------------------- 
开发者ID:apache,项目名称:incubator-tvm,代码行数:20,代码来源:test_forward.py

示例6: normalized_image

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import subtract [as 别名]
def normalized_image(images):
  # Rescale from [0, 255] to [0, 2]
  images = tf.multiply(images, 1. / 127.5)
  # Rescale to [-1, 1]
  mlperf.logger.log(key=mlperf.tags.INPUT_MEAN_SUBTRACTION, value=[1.0] * 3)
  return tf.subtract(images, 1.0) 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:8,代码来源:preprocessing.py

示例7: compute_lengths

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import subtract [as 别名]
def compute_lengths(symbols_list, eos_symbol, name=None,
                    dtype=tf.int64):
  """Computes sequence lengths given end-of-sequence symbol.

  Args:
    symbols_list: list of [batch_size] tensors of symbols (e.g. integers).
    eos_symbol: end of sequence symbol (e.g. integer).
    name: name for the name scope of this op.
    dtype: type of symbols, default: tf.int64.

  Returns:
    Tensor [batch_size] of lengths of sequences.
  """
  with tf.name_scope(name, 'compute_lengths'):
    max_len = len(symbols_list)
    eos_symbol_ = tf.constant(eos_symbol, dtype=dtype)
    # Array with max_len-time where we have EOS, 0 otherwise. Maximum of this is
    # the first EOS in that example.
    ends = [tf.constant(max_len - i, dtype=tf.int64)
            * tf.to_int64(tf.equal(s, eos_symbol_))
            for i, s in enumerate(symbols_list)]
    # Lengths of sequences, or max_len for sequences that didn't have EOS.
    # Note: examples that don't have EOS will have max value of 0 and value of
    # max_len+1 in lens_.
    lens_ = max_len + 1 - tf.reduce_max(tf.stack(ends, 1), axis=1)
    # For examples that didn't have EOS decrease max_len+1 to max_len as the
    # length.
    lens = tf.subtract(lens_, tf.to_int64(tf.equal(lens_, max_len + 1)))
    return tf.stop_gradient(tf.reshape(lens, [-1])) 
开发者ID:deepmind,项目名称:lamb,代码行数:31,代码来源:utils.py

示例8: compute_area_features

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import subtract [as 别名]
def compute_area_features(features, max_area_width, max_area_height=1, height=1,
                          epsilon=1e-6):
  """Computes features for each area.

  Args:
    features: a Tensor in a shape of [batch_size, height * width, depth].
    max_area_width: the max width allowed for an area.
    max_area_height: the max height allowed for an area.
    height: the height of the image.
    epsilon: the epsilon added to the variance for computing standard deviation.
  Returns:
    area_mean: A Tensor of shape [batch_size, num_areas, depth]
    area_std: A Tensor of shape [batch_size, num_areas, depth]
    area_sum: A Tensor of shape [batch_size, num_areas, depth]
    area_heights: A Tensor of shape [batch_size, num_areas, 1]
    area_widths: A Tensor of shape [batch_size, num_areas, 1]
  """
  with tf.name_scope("compute_area_features"):
    tf.logging.info("area_attention compute_area_features: %d x %d",
                    max_area_height, max_area_width)
    area_sum, area_heights, area_widths = _compute_sum_image(
        features, max_area_width=max_area_width,
        max_area_height=max_area_height, height=height)
    area_squared_sum, _, _ = _compute_sum_image(
        tf.pow(features, 2), max_area_width=max_area_width,
        max_area_height=max_area_height, height=height)
    sizes = tf.multiply(area_heights, area_widths)
    float_area_sizes = tf.to_float(sizes)
    area_mean = tf.div(area_sum, float_area_sizes)
    s2_n = tf.div(area_squared_sum, float_area_sizes)
    area_variance = tf.subtract(s2_n, tf.pow(area_mean, 2))
    area_std = tf.sqrt(tf.abs(area_variance) + epsilon)
    return area_mean, area_std, area_sum, area_heights, area_widths 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:35,代码来源:area_attention.py

示例9: _mean_image_subtraction

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import subtract [as 别名]
def _mean_image_subtraction(image, means):
  """Subtracts the given means from each image channel.

  For example:
    means = [123.68, 116.779, 103.939]
    image = _mean_image_subtraction(image, means)

  Note that the rank of `image` must be known.

  Args:
    image: a tensor of size [height, width, C].
    means: a C-vector of values to subtract from each channel.

  Returns:
    the centered image.

  Raises:
    ValueError: If the rank of `image` is unknown, if `image` has a rank other
      than three or if the number of channels in `image` doesn't match the
      number of values in `means`.
  """
  if image.get_shape().ndims != 3:
    raise ValueError("Input must be of size [height, width, C>0]")
  num_channels = image.get_shape().as_list()[-1]
  if len(means) != num_channels:
    raise ValueError("len(means) must match the number of channels")

  channels = tf.split(axis=2, num_or_size_splits=num_channels, value=image)
  for i in range(num_channels):
    channels[i] -= means[i]
  return tf.concat(axis=2, values=channels) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:33,代码来源:vqa_utils.py

示例10: vqa_v2_preprocess_image

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import subtract [as 别名]
def vqa_v2_preprocess_image(
    image,
    height,
    width,
    mode,
    resize_side=512,
    distort=True,
    image_model_fn="resnet_v1_152",
):
  """vqa v2 preprocess image."""

  image = tf.image.convert_image_dtype(image, dtype=tf.float32)
  assert resize_side > 0
  if resize_side:
    image = _aspect_preserving_resize(image, resize_side)
  if mode == tf.estimator.ModeKeys.TRAIN:
    image = tf.random_crop(image, [height, width, 3])
  else:
    # Central crop, assuming resize_height > height, resize_width > width.
    image = tf.image.resize_image_with_crop_or_pad(image, height, width)

  image = tf.clip_by_value(image, 0.0, 1.0)

  if mode == tf.estimator.ModeKeys.TRAIN and distort:
    image = _flip(image)
    num_distort_cases = 4
    # pylint: disable=unnecessary-lambda
    image = _apply_with_random_selector(
        image, lambda x, ordering: _distort_color(x, ordering),
        num_cases=num_distort_cases)

  if image_model_fn.startswith("resnet_v1"):
    # resnet_v1 uses vgg preprocessing
    image = image * 255.
    image = _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN])
  elif image_model_fn.startswith("resnet_v2"):
    # resnet v2 uses inception preprocessing
    image = tf.subtract(image, 0.5)
    image = tf.multiply(image, 2.0)

  return image 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:43,代码来源:vqa_utils.py

示例11: testSubIntervalBounds

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import subtract [as 别名]
def testSubIntervalBounds(self):
    m = tf.subtract
    z = tf.constant([[-2, 3, 0]], dtype=tf.float32)
    m = ibp.PiecewiseMonotonicWrapper(m)
    input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
    output_bounds = m.propagate_bounds(input_bounds, input_bounds)
    with self.test_session() as sess:
      l, u = sess.run([output_bounds.lower, output_bounds.upper])
      self.assertAlmostEqual([[-2., -2., -2.]], l.tolist())
      self.assertAlmostEqual([[2., 2., 2.]], u.tolist()) 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:12,代码来源:bounds_test.py

示例12: _tf_squared_euclidean

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import subtract [as 别名]
def _tf_squared_euclidean(X, Y):
        """Squared Euclidean distance between the rows of `X` and `Y`.
        """
        return tf.reduce_sum(tf.pow(tf.subtract(X, Y), 2), axis=1) 
开发者ID:roamanalytics,项目名称:mittens,代码行数:6,代码来源:tf_mittens.py

示例13: add_jpeg_decoding

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import subtract [as 别名]
def add_jpeg_decoding(input_width, input_height, input_depth, input_mean,
                      input_std):
  """Adds operations that perform JPEG decoding and resizing to the graph..

  Args:
    input_width: Desired width of the image fed into the recognizer graph.
    input_height: Desired width of the image fed into the recognizer graph.
    input_depth: Desired channels of the image fed into the recognizer graph.
    input_mean: Pixel value that should be zero in the image for the graph.
    input_std: How much to divide the pixel values by before recognition.

  Returns:
    Tensors for the node to feed JPEG data into, and the output of the
      preprocessing steps.
  """
  jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')
  decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)
  decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)
  decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
  resize_shape = tf.stack([input_height, input_width])
  resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)
  resized_image = tf.image.resize_bilinear(decoded_image_4d,
                                           resize_shape_as_int)
  offset_image = tf.subtract(resized_image, input_mean)
  mul_image = tf.multiply(offset_image, 1.0 / input_std)
  return jpeg_data, mul_image 
开发者ID:iamvishnuks,项目名称:AudioNet,代码行数:28,代码来源:retrain.py

示例14: preprocess_image

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import subtract [as 别名]
def preprocess_image(image,
                     output_height,
                     output_width,
                     is_training,
                     use_grayscale=False):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.
    use_grayscale: Whether to convert the image from RGB to grayscale.

  Returns:
    A preprocessed image.
  """
  del is_training  # Unused argument
  image = tf.to_float(image)
  if use_grayscale:
    image = tf.image.rgb_to_grayscale(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.subtract(image, 128.0)
  image = tf.div(image, 128.0)
  return image 
开发者ID:tensorflow,项目名称:models,代码行数:29,代码来源:lenet_preprocessing.py

示例15: normalize_image

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import subtract [as 别名]
def normalize_image(image, original_minval, original_maxval, target_minval,
                    target_maxval):
  """Normalizes pixel values in the image.

  Moves the pixel values from the current [original_minval, original_maxval]
  range to a the [target_minval, target_maxval] range.

  Args:
    image: rank 3 float32 tensor containing 1
           image -> [height, width, channels].
    original_minval: current image minimum value.
    original_maxval: current image maximum value.
    target_minval: target image minimum value.
    target_maxval: target image maximum value.

  Returns:
    image: image which is the same shape as input image.
  """
  with tf.name_scope('NormalizeImage', values=[image]):
    original_minval = float(original_minval)
    original_maxval = float(original_maxval)
    target_minval = float(target_minval)
    target_maxval = float(target_maxval)
    image = tf.cast(image, dtype=tf.float32)
    image = tf.subtract(image, original_minval)
    image = tf.multiply(image, (target_maxval - target_minval) /
                        (original_maxval - original_minval))
    image = tf.add(image, target_minval)
    return image 
开发者ID:tensorflow,项目名称:models,代码行数:31,代码来源:preprocessor.py


注:本文中的tensorflow.compat.v1.subtract方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。