当前位置: 首页>>代码示例>>Python>>正文


Python ops.batch_position_sensitive_crop_regions方法代码示例

本文整理汇总了Python中object_detection.utils.ops.batch_position_sensitive_crop_regions方法的典型用法代码示例。如果您正苦于以下问题:Python ops.batch_position_sensitive_crop_regions方法的具体用法?Python ops.batch_position_sensitive_crop_regions怎么用?Python ops.batch_position_sensitive_crop_regions使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在object_detection.utils.ops的用法示例。


在下文中一共展示了ops.batch_position_sensitive_crop_regions方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_position_sensitive_with_single_bin

# 需要导入模块: from object_detection.utils import ops [as 别名]
# 或者: from object_detection.utils.ops import batch_position_sensitive_crop_regions [as 别名]
def test_position_sensitive_with_single_bin(self):
    num_spatial_bins = [1, 1]
    image_shape = [2, 3, 3, 4]
    crop_size = [2, 2]

    image = tf.random_uniform(image_shape)
    boxes = tf.random_uniform((2, 3, 4))
    box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32)

    # When a single bin is used, position-sensitive crop and pool should be
    # the same as non-position sensitive crop and pool.
    crop = tf.image.crop_and_resize(image, tf.reshape(boxes, [-1, 4]), box_ind,
                                    crop_size)
    crop_and_pool = tf.reduce_mean(crop, [1, 2], keepdims=True)
    crop_and_pool = tf.reshape(crop_and_pool, [2, 3, 1, 1, 4])

    ps_crop_and_pool = ops.batch_position_sensitive_crop_regions(
        image, boxes, crop_size, num_spatial_bins, global_pool=True)

    with self.test_session() as sess:
      expected_output, output = sess.run((crop_and_pool, ps_crop_and_pool))
      self.assertAllClose(output, expected_output) 
开发者ID:ahmetozlu,项目名称:vehicle_counting_tensorflow,代码行数:24,代码来源:ops_test.py

示例2: test_position_sensitive_with_global_pool_false_and_single_bin

# 需要导入模块: from object_detection.utils import ops [as 别名]
# 或者: from object_detection.utils.ops import batch_position_sensitive_crop_regions [as 别名]
def test_position_sensitive_with_global_pool_false_and_single_bin(self):
    num_spatial_bins = [1, 1]
    image_shape = [2, 3, 3, 4]
    crop_size = [1, 1]

    images = tf.random_uniform(image_shape)
    boxes = tf.random_uniform((2, 3, 4))
    # box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32)

    # Since single_bin is used and crop_size = [1, 1] (i.e., no crop resize),
    # the outputs are the same whatever the global_pool value is.
    ps_crop_and_pool = ops.batch_position_sensitive_crop_regions(
        images, boxes, crop_size, num_spatial_bins, global_pool=True)
    ps_crop = ops.batch_position_sensitive_crop_regions(
        images, boxes, crop_size, num_spatial_bins, global_pool=False)

    with self.test_session() as sess:
      pooled_output, unpooled_output = sess.run((ps_crop_and_pool, ps_crop))
      self.assertAllClose(pooled_output, unpooled_output) 
开发者ID:ahmetozlu,项目名称:vehicle_counting_tensorflow,代码行数:21,代码来源:ops_test.py

示例3: test_position_sensitive_with_single_bin

# 需要导入模块: from object_detection.utils import ops [as 别名]
# 或者: from object_detection.utils.ops import batch_position_sensitive_crop_regions [as 别名]
def test_position_sensitive_with_single_bin(self):
    num_spatial_bins = [1, 1]
    image_shape = [2, 3, 3, 4]
    crop_size = [2, 2]

    def graph_fn():
      image = tf.random_uniform(image_shape)
      boxes = tf.random_uniform((2, 3, 4))
      box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32)

      # When a single bin is used, position-sensitive crop and pool should be
      # the same as non-position sensitive crop and pool.
      crop = tf.image.crop_and_resize(image,
                                      tf.reshape(boxes, [-1, 4]), box_ind,
                                      crop_size)
      crop_and_pool = tf.reduce_mean(crop, [1, 2], keepdims=True)
      crop_and_pool = tf.reshape(crop_and_pool, [2, 3, 1, 1, 4])

      ps_crop_and_pool = ops.batch_position_sensitive_crop_regions(
          image, boxes, crop_size, num_spatial_bins, global_pool=True)
      return crop_and_pool, ps_crop_and_pool

    # Crop and resize is not supported on TPUs.
    expected_output, output = self.execute_cpu(graph_fn, [])
    self.assertAllClose(output, expected_output) 
开发者ID:tensorflow,项目名称:models,代码行数:27,代码来源:ops_test.py

示例4: test_position_sensitive_with_global_pool_false_and_single_bin

# 需要导入模块: from object_detection.utils import ops [as 别名]
# 或者: from object_detection.utils.ops import batch_position_sensitive_crop_regions [as 别名]
def test_position_sensitive_with_global_pool_false_and_single_bin(self):
    num_spatial_bins = [1, 1]
    image_shape = [2, 3, 3, 4]
    crop_size = [1, 1]

    def graph_fn():
      images = tf.random_uniform(image_shape)
      boxes = tf.random_uniform((2, 3, 4))
      # box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32)

      # Since single_bin is used and crop_size = [1, 1] (i.e., no crop resize),
      # the outputs are the same whatever the global_pool value is.
      ps_crop_and_pool = ops.batch_position_sensitive_crop_regions(
          images, boxes, crop_size, num_spatial_bins, global_pool=True)
      ps_crop = ops.batch_position_sensitive_crop_regions(
          images, boxes, crop_size, num_spatial_bins, global_pool=False)
      return ps_crop_and_pool, ps_crop

    pooled_output, unpooled_output = self.execute(graph_fn, [])
    self.assertAllClose(pooled_output, unpooled_output)


# The following tests are only executed on CPU because the output
# shape is not constant. 
开发者ID:tensorflow,项目名称:models,代码行数:26,代码来源:ops_test.py

示例5: test_position_sensitive_with_global_pool_false_and_known_boxes

# 需要导入模块: from object_detection.utils import ops [as 别名]
# 或者: from object_detection.utils.ops import batch_position_sensitive_crop_regions [as 别名]
def test_position_sensitive_with_global_pool_false_and_known_boxes(self):
    num_spatial_bins = [2, 2]
    image_shape = [2, 2, 2, 4]
    crop_size = [2, 2]

    images = tf.constant(range(1, 2 * 2 * 4  + 1) * 2, dtype=tf.float32,
                         shape=image_shape)

    # First box contains whole image, and second box contains only first row.
    boxes = tf.constant(np.array([[[0., 0., 1., 1.]],
                                  [[0., 0., 0.5, 1.]]]), dtype=tf.float32)
    # box_ind = tf.constant([0, 1], dtype=tf.int32)

    expected_output = []

    # Expected output, when the box containing whole image.
    expected_output.append(
        np.reshape(np.array([[4, 7],
                             [10, 13]]),
                   (1, 2, 2, 1))
    )

    # Expected output, when the box containing only first row.
    expected_output.append(
        np.reshape(np.array([[3, 6],
                             [7, 10]]),
                   (1, 2, 2, 1))
    )
    expected_output = np.stack(expected_output, axis=0)

    ps_crop = ops.batch_position_sensitive_crop_regions(
        images, boxes, crop_size, num_spatial_bins, global_pool=False)

    with self.test_session() as sess:
      output = sess.run(ps_crop)
      self.assertAllEqual(output, expected_output) 
开发者ID:ahmetozlu,项目名称:vehicle_counting_tensorflow,代码行数:38,代码来源:ops_test.py

示例6: test_position_sensitive_with_global_pool_false_and_known_boxes

# 需要导入模块: from object_detection.utils import ops [as 别名]
# 或者: from object_detection.utils.ops import batch_position_sensitive_crop_regions [as 别名]
def test_position_sensitive_with_global_pool_false_and_known_boxes(self):
    num_spatial_bins = [2, 2]
    image_shape = [2, 2, 2, 4]
    crop_size = [2, 2]

    images = tf.constant(
        list(range(1, 2 * 2 * 4 + 1)) * 2, dtype=tf.float32, shape=image_shape)

    # First box contains whole image, and second box contains only first row.
    boxes = tf.constant(np.array([[[0., 0., 1., 1.]],
                                  [[0., 0., 0.5, 1.]]]), dtype=tf.float32)
    # box_ind = tf.constant([0, 1], dtype=tf.int32)

    expected_output = []

    # Expected output, when the box containing whole image.
    expected_output.append(
        np.reshape(np.array([[4, 7],
                             [10, 13]]),
                   (1, 2, 2, 1))
    )

    # Expected output, when the box containing only first row.
    expected_output.append(
        np.reshape(np.array([[3, 6],
                             [7, 10]]),
                   (1, 2, 2, 1))
    )
    expected_output = np.stack(expected_output, axis=0)

    ps_crop = ops.batch_position_sensitive_crop_regions(
        images, boxes, crop_size, num_spatial_bins, global_pool=False)

    with self.test_session() as sess:
      output = sess.run(ps_crop)
      self.assertAllEqual(output, expected_output) 
开发者ID:ShivangShekhar,项目名称:Live-feed-object-device-identification-using-Tensorflow-and-OpenCV,代码行数:38,代码来源:ops_test.py

示例7: test_position_sensitive_with_global_pool_false_and_known_boxes

# 需要导入模块: from object_detection.utils import ops [as 别名]
# 或者: from object_detection.utils.ops import batch_position_sensitive_crop_regions [as 别名]
def test_position_sensitive_with_global_pool_false_and_known_boxes(self):
    num_spatial_bins = [2, 2]
    image_shape = [2, 2, 2, 4]
    crop_size = [2, 2]

    # box_ind = tf.constant([0, 1], dtype=tf.int32)

    expected_output = []

    # Expected output, when the box containing whole image.
    expected_output.append(
        np.reshape(np.array([[4, 7],
                             [10, 13]]),
                   (1, 2, 2, 1))
    )

    # Expected output, when the box containing only first row.
    expected_output.append(
        np.reshape(np.array([[3, 6],
                             [7, 10]]),
                   (1, 2, 2, 1))
    )
    expected_output = np.stack(expected_output, axis=0)

    def graph_fn():
      images = tf.constant(
          list(range(1, 2 * 2 * 4 + 1)) * 2, dtype=tf.float32,
          shape=image_shape)

      # First box contains whole image, and second box contains only first row.
      boxes = tf.constant(np.array([[[0., 0., 1., 1.]],
                                    [[0., 0., 0.5, 1.]]]), dtype=tf.float32)

      ps_crop = ops.batch_position_sensitive_crop_regions(
          images, boxes, crop_size, num_spatial_bins, global_pool=False)
      return ps_crop

    output = self.execute(graph_fn, [])
    self.assertAllEqual(output, expected_output) 
开发者ID:tensorflow,项目名称:models,代码行数:41,代码来源:ops_test.py


注:本文中的object_detection.utils.ops.batch_position_sensitive_crop_regions方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。