当前位置: 首页>>代码示例>>Python>>正文


Python ops.position_sensitive_crop_regions方法代码示例

本文整理汇总了Python中object_detection.utils.ops.position_sensitive_crop_regions方法的典型用法代码示例。如果您正苦于以下问题:Python ops.position_sensitive_crop_regions方法的具体用法?Python ops.position_sensitive_crop_regions怎么用?Python ops.position_sensitive_crop_regions使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在object_detection.utils.ops的用法示例。


在下文中一共展示了ops.position_sensitive_crop_regions方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_position_sensitive

# 需要导入模块: from object_detection.utils import ops [as 别名]
# 或者: from object_detection.utils.ops import position_sensitive_crop_regions [as 别名]
def test_position_sensitive(self):
    num_spatial_bins = [3, 2]
    image_shape = [1, 3, 2, 6]

    # First channel is 1's, second channel is 2's, etc.
    image = tf.constant(range(1, 3 * 2 + 1) * 6, dtype=tf.float32,
                        shape=image_shape)
    boxes = tf.random_uniform((2, 4))
    box_ind = tf.constant([0, 0], dtype=tf.int32)

    # The result for both boxes should be [[1, 2], [3, 4], [5, 6]]
    # before averaging.
    expected_output = np.array([3.5, 3.5]).reshape([2, 1, 1, 1])

    for crop_size_mult in range(1, 3):
      crop_size = [3 * crop_size_mult, 2 * crop_size_mult]
      ps_crop_and_pool = ops.position_sensitive_crop_regions(
          image, boxes, box_ind, crop_size, num_spatial_bins, global_pool=True)

      with self.test_session() as sess:
        output = sess.run(ps_crop_and_pool)
        self.assertAllClose(output, expected_output) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:24,代码来源:ops_test.py

示例2: test_position_sensitive_with_single_bin

# 需要导入模块: from object_detection.utils import ops [as 别名]
# 或者: from object_detection.utils.ops import position_sensitive_crop_regions [as 别名]
def test_position_sensitive_with_single_bin(self):
    num_spatial_bins = [1, 1]
    image_shape = [2, 3, 3, 4]
    crop_size = [2, 2]

    image = tf.random_uniform(image_shape)
    boxes = tf.random_uniform((6, 4))
    box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32)

    # When a single bin is used, position-sensitive crop and pool should be
    # the same as non-position sensitive crop and pool.
    crop = tf.image.crop_and_resize(image, boxes, box_ind, crop_size)
    crop_and_pool = tf.reduce_mean(crop, [1, 2], keep_dims=True)

    ps_crop_and_pool = ops.position_sensitive_crop_regions(
        image, boxes, box_ind, crop_size, num_spatial_bins, global_pool=True)

    with self.test_session() as sess:
      expected_output, output = sess.run((crop_and_pool, ps_crop_and_pool))
      self.assertAllClose(output, expected_output) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:22,代码来源:ops_test.py

示例3: test_position_sensitive_with_global_pool_false_and_single_bin

# 需要导入模块: from object_detection.utils import ops [as 别名]
# 或者: from object_detection.utils.ops import position_sensitive_crop_regions [as 别名]
def test_position_sensitive_with_global_pool_false_and_single_bin(self):
    num_spatial_bins = [1, 1]
    image_shape = [2, 3, 3, 4]
    crop_size = [1, 1]

    image = tf.random_uniform(image_shape)
    boxes = tf.random_uniform((6, 4))
    box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32)

    # Since single_bin is used and crop_size = [1, 1] (i.e., no crop resize),
    # the outputs are the same whatever the global_pool value is.
    ps_crop_and_pool = ops.position_sensitive_crop_regions(
        image, boxes, box_ind, crop_size, num_spatial_bins, global_pool=True)
    ps_crop = ops.position_sensitive_crop_regions(
        image, boxes, box_ind, crop_size, num_spatial_bins, global_pool=False)

    with self.test_session() as sess:
      pooled_output, unpooled_output = sess.run((ps_crop_and_pool, ps_crop))
      self.assertAllClose(pooled_output, unpooled_output) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:21,代码来源:ops_test.py

示例4: test_position_sensitive

# 需要导入模块: from object_detection.utils import ops [as 别名]
# 或者: from object_detection.utils.ops import position_sensitive_crop_regions [as 别名]
def test_position_sensitive(self):
    num_spatial_bins = [3, 2]
    image_shape = [3, 2, 6]

    # First channel is 1's, second channel is 2's, etc.
    image = tf.constant(range(1, 3 * 2 + 1) * 6, dtype=tf.float32,
                        shape=image_shape)
    boxes = tf.random_uniform((2, 4))

    # The result for both boxes should be [[1, 2], [3, 4], [5, 6]]
    # before averaging.
    expected_output = np.array([3.5, 3.5]).reshape([2, 1, 1, 1])

    for crop_size_mult in range(1, 3):
      crop_size = [3 * crop_size_mult, 2 * crop_size_mult]
      ps_crop_and_pool = ops.position_sensitive_crop_regions(
          image, boxes, crop_size, num_spatial_bins, global_pool=True)

      with self.test_session() as sess:
        output = sess.run(ps_crop_and_pool)
        self.assertAllClose(output, expected_output) 
开发者ID:ahmetozlu,项目名称:vehicle_counting_tensorflow,代码行数:23,代码来源:ops_test.py

示例5: test_position_sensitive_with_equal_channels

# 需要导入模块: from object_detection.utils import ops [as 别名]
# 或者: from object_detection.utils.ops import position_sensitive_crop_regions [as 别名]
def test_position_sensitive_with_equal_channels(self):
    num_spatial_bins = [2, 2]
    image_shape = [1, 3, 3, 4]
    crop_size = [2, 2]

    image = tf.constant(range(1, 3 * 3 + 1), dtype=tf.float32,
                        shape=[1, 3, 3, 1])
    tiled_image = tf.tile(image, [1, 1, 1, image_shape[3]])
    boxes = tf.random_uniform((3, 4))
    box_ind = tf.constant([0, 0, 0], dtype=tf.int32)

    # All channels are equal so position-sensitive crop and resize should
    # work as the usual crop and resize for just one channel.
    crop = tf.image.crop_and_resize(image, boxes, box_ind, crop_size)
    crop_and_pool = tf.reduce_mean(crop, [1, 2], keep_dims=True)

    ps_crop_and_pool = ops.position_sensitive_crop_regions(
        tiled_image,
        boxes,
        box_ind,
        crop_size,
        num_spatial_bins,
        global_pool=True)

    with self.test_session() as sess:
      expected_output, output = sess.run((crop_and_pool, ps_crop_and_pool))
      self.assertAllClose(output, expected_output) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:29,代码来源:ops_test.py

示例6: test_raise_value_error_on_num_bins_less_than_one

# 需要导入模块: from object_detection.utils import ops [as 别名]
# 或者: from object_detection.utils.ops import position_sensitive_crop_regions [as 别名]
def test_raise_value_error_on_num_bins_less_than_one(self):
    num_spatial_bins = [1, -1]
    image_shape = [1, 1, 1, 2]
    crop_size = [2, 2]

    image = tf.constant(1, dtype=tf.float32, shape=image_shape)
    boxes = tf.constant([[0, 0, 1, 1]], dtype=tf.float32)
    box_ind = tf.constant([0], dtype=tf.int32)

    with self.assertRaisesRegexp(ValueError, 'num_spatial_bins should be >= 1'):
      ops.position_sensitive_crop_regions(
          image, boxes, box_ind, crop_size, num_spatial_bins, global_pool=True) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:14,代码来源:ops_test.py

示例7: test_raise_value_error_on_non_divisible_crop_size

# 需要导入模块: from object_detection.utils import ops [as 别名]
# 或者: from object_detection.utils.ops import position_sensitive_crop_regions [as 别名]
def test_raise_value_error_on_non_divisible_crop_size(self):
    num_spatial_bins = [2, 3]
    image_shape = [1, 1, 1, 6]
    crop_size = [3, 2]

    image = tf.constant(1, dtype=tf.float32, shape=image_shape)
    boxes = tf.constant([[0, 0, 1, 1]], dtype=tf.float32)
    box_ind = tf.constant([0], dtype=tf.int32)

    with self.assertRaisesRegexp(
        ValueError, 'crop_size should be divisible by num_spatial_bins'):
      ops.position_sensitive_crop_regions(
          image, boxes, box_ind, crop_size, num_spatial_bins, global_pool=True) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:15,代码来源:ops_test.py

示例8: test_position_sensitive_with_global_pool_false

# 需要导入模块: from object_detection.utils import ops [as 别名]
# 或者: from object_detection.utils.ops import position_sensitive_crop_regions [as 别名]
def test_position_sensitive_with_global_pool_false(self):
    num_spatial_bins = [3, 2]
    image_shape = [1, 3, 2, 6]
    num_boxes = 2

    # First channel is 1's, second channel is 2's, etc.
    image = tf.constant(range(1, 3 * 2 + 1) * 6, dtype=tf.float32,
                        shape=image_shape)
    boxes = tf.random_uniform((num_boxes, 4))
    box_ind = tf.constant([0, 0], dtype=tf.int32)

    expected_output = []

    # Expected output, when crop_size = [3, 2].
    expected_output.append(np.expand_dims(
        np.tile(np.array([[1, 2],
                          [3, 4],
                          [5, 6]]), (num_boxes, 1, 1)),
        axis=-1))

    # Expected output, when crop_size = [6, 4].
    expected_output.append(np.expand_dims(
        np.tile(np.array([[1, 1, 2, 2],
                          [1, 1, 2, 2],
                          [3, 3, 4, 4],
                          [3, 3, 4, 4],
                          [5, 5, 6, 6],
                          [5, 5, 6, 6]]), (num_boxes, 1, 1)),
        axis=-1))

    for crop_size_mult in range(1, 3):
      crop_size = [3 * crop_size_mult, 2 * crop_size_mult]
      ps_crop = ops.position_sensitive_crop_regions(
          image, boxes, box_ind, crop_size, num_spatial_bins, global_pool=False)
      with self.test_session() as sess:
        output = sess.run(ps_crop)

      self.assertAllEqual(output, expected_output[crop_size_mult - 1]) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:40,代码来源:ops_test.py

示例9: test_position_sensitive_with_global_pool_false_and_known_boxes

# 需要导入模块: from object_detection.utils import ops [as 别名]
# 或者: from object_detection.utils.ops import position_sensitive_crop_regions [as 别名]
def test_position_sensitive_with_global_pool_false_and_known_boxes(self):
    num_spatial_bins = [2, 2]
    image_shape = [2, 2, 2, 4]
    crop_size = [2, 2]

    image = tf.constant(range(1, 2 * 2 * 4  + 1) * 2, dtype=tf.float32,
                        shape=image_shape)

    # First box contains whole image, and second box contains only first row.
    boxes = tf.constant(np.array([[0., 0., 1., 1.],
                                  [0., 0., 0.5, 1.]]), dtype=tf.float32)
    box_ind = tf.constant([0, 1], dtype=tf.int32)

    expected_output = []

    # Expected output, when the box containing whole image.
    expected_output.append(
        np.reshape(np.array([[4, 7],
                             [10, 13]]),
                   (1, 2, 2, 1))
    )

    # Expected output, when the box containing only first row.
    expected_output.append(
        np.reshape(np.array([[3, 6],
                             [7, 10]]),
                   (1, 2, 2, 1))
    )
    expected_output = np.concatenate(expected_output, axis=0)

    ps_crop = ops.position_sensitive_crop_regions(
        image, boxes, box_ind, crop_size, num_spatial_bins, global_pool=False)

    with self.test_session() as sess:
      output = sess.run(ps_crop)
      self.assertAllEqual(output, expected_output) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:38,代码来源:ops_test.py

示例10: test_raise_value_error_on_non_square_block_size

# 需要导入模块: from object_detection.utils import ops [as 别名]
# 或者: from object_detection.utils.ops import position_sensitive_crop_regions [as 别名]
def test_raise_value_error_on_non_square_block_size(self):
    num_spatial_bins = [3, 2]
    image_shape = [1, 3, 2, 6]
    crop_size = [6, 2]

    image = tf.constant(1, dtype=tf.float32, shape=image_shape)
    boxes = tf.constant([[0, 0, 1, 1]], dtype=tf.float32)
    box_ind = tf.constant([0], dtype=tf.int32)

    with self.assertRaisesRegexp(
        ValueError, 'Only support square bin crop size for now.'):
      ops.position_sensitive_crop_regions(
          image, boxes, box_ind, crop_size, num_spatial_bins, global_pool=False) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:15,代码来源:ops_test.py


注:本文中的object_detection.utils.ops.position_sensitive_crop_regions方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。