当前位置: 首页>>代码示例>>Python>>正文


Python cfg.PIXEL_MEANS属性代码示例

本文整理汇总了Python中model.utils.config.cfg.PIXEL_MEANS属性的典型用法代码示例。如果您正苦于以下问题:Python cfg.PIXEL_MEANS属性的具体用法?Python cfg.PIXEL_MEANS怎么用?Python cfg.PIXEL_MEANS使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在model.utils.config.cfg的用法示例。


在下文中一共展示了cfg.PIXEL_MEANS属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _plot_image

# 需要导入模块: from model.utils.config import cfg [as 别名]
# 或者: from model.utils.config.cfg import PIXEL_MEANS [as 别名]
def _plot_image(self, data, gt_boxes, num_boxes):
      import matplotlib.pyplot as plt
      X=data.cpu().numpy().copy()
      X += cfg.PIXEL_MEANS
      X = X.astype(np.uint8) 
      X = X.squeeze(0)
      boxes = gt_boxes.squeeze(0)[:num_boxes.view(-1)[0],:].cpu().numpy().copy()

      fig, ax = plt.subplots(figsize=(8,8))
      ax.imshow(X[:,:,::-1], aspect='equal')
      for i in range(boxes.shape[0]):
          bbox = boxes[i, :4]
          ax.add_patch(
                  plt.Rectangle((bbox[0], bbox[1]),
                                 bbox[2]-bbox[0],
                                 bbox[3]-bbox[1], fill=False, linewidth=2.0)
                  )
      #plt.imshow(X[:,:,::-1])
      plt.tight_layout()
      plt.show() 
开发者ID:Feynman27,项目名称:pytorch-detect-to-track,代码行数:22,代码来源:roibatchLoader.py

示例2: _get_image_blob

# 需要导入模块: from model.utils.config import cfg [as 别名]
# 或者: from model.utils.config.cfg import PIXEL_MEANS [as 别名]
def _get_image_blob(roidb, scale_inds):
    num_images = len(roidb)
    processed_ims = []
    im_scales = []
    for i in range(num_images):
        im = cv2.imread(roidb[i]['file_path'])

        if roidb[i]['flipped']:
            im = im[:, ::-1, :]

        target_size = cfg.TRAIN.SCALES[scale_inds[i]]
        im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
                                        cfg.TRAIN.MAX_SIZE)
        im_scales.append(im_scale)
        processed_ims.append(im)

    blob = im_list_to_blob(processed_ims)
    return blob, im_scales 
开发者ID:dechunwang,项目名称:SSH-pytorch,代码行数:20,代码来源:minibatch.py

示例3: _get_image_blob

# 需要导入模块: from model.utils.config import cfg [as 别名]
# 或者: from model.utils.config.cfg import PIXEL_MEANS [as 别名]
def _get_image_blob(im, im_scales):
    """
    :param im: input image
    :param im_scales: a list of scale coefficients
    :return: A list of network blobs each containing a resized ver. of the image
    """
    # Subtract the mean
    im_copy = im.astype(np.float32, copy=True) - cfg.PIXEL_MEANS

    # Append all scales to form a blob
    blobs = []
    for scale in im_scales:
        if scale == 1.0:
            blobs.append({'data': im_list_to_blob([im_copy])})
        else:
            blobs.append({'data': im_list_to_blob([cv2.resize(im_copy, None, None, fx=scale, fy=scale,
                                                              interpolation=cv2.INTER_LINEAR)])})
    return blobs 
开发者ID:dechunwang,项目名称:SSH-pytorch,代码行数:20,代码来源:test_utils.py

示例4: _get_image_blob

# 需要导入模块: from model.utils.config import cfg [as 别名]
# 或者: from model.utils.config.cfg import PIXEL_MEANS [as 别名]
def _get_image_blob(im):
    """Converts an image into a network input.
    Arguments:
      im (ndarray): a color image in BGR order
    Returns:
      blob (ndarray): a data blob holding an image pyramid
      im_scale_factors (list): list of image scales (relative to im) used
        in the image pyramid
    """
    im_orig = im.astype(np.float32, copy=True)  # RGB
    im_orig /= 255.0
    im_orig -= cfg.PIXEL_MEANS
    im_orig /= cfg.PIXEL_STDS

    im_shape = im_orig.shape
    im_size_min = np.min(im_shape[0:2])
    im_size_max = np.max(im_shape[0:2])

    processed_ims = []
    im_scale_factors = []

    for target_size in cfg.TEST.SCALES:
        im_scale = float(target_size) / float(im_size_min)
        # Prevent the biggest axis from being more than MAX_SIZE
        if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
            im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
        im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
                        interpolation=cv2.INTER_LINEAR)
        im_scale_factors.append(im_scale)
        processed_ims.append(im)

    # Create a blob to hold the input images
    blob = im_list_to_blob(processed_ims)

    return blob, np.array(im_scale_factors) 
开发者ID:guoruoqian,项目名称:cascade-rcnn_Pytorch,代码行数:37,代码来源:demo.py

示例5: _get_image_blob

# 需要导入模块: from model.utils.config import cfg [as 别名]
# 或者: from model.utils.config.cfg import PIXEL_MEANS [as 别名]
def _get_image_blob(roidb, scale_inds):
  """Builds an input blob from the images in the roidb at the specified
  scales.
  """
  num_images = len(roidb)

  processed_ims = []
  im_scales = []
  for i in range(num_images):
    #im = cv2.imread(roidb[i]['image'])
    im = imread(roidb[i]['image'])

    if len(im.shape) == 2:
      im = im[:,:,np.newaxis]
      im = np.concatenate((im,im,im), axis=2)
    # flip the channel, since the original one using cv2
    # rgb -> bgr
    # im = im[:,:,::-1]

    if roidb[i]['flipped']:
      im = im[:, ::-1, :]
    target_size = cfg.TRAIN.SCALES[scale_inds[i]]
    im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, cfg.PIXEL_STDS, target_size,
                    cfg.TRAIN.MAX_SIZE)
    im_scales.append(im_scale)
    processed_ims.append(im)

  # Create a blob to hold the input images
  blob = im_list_to_blob(processed_ims)

  return blob, im_scales 
开发者ID:guoruoqian,项目名称:cascade-rcnn_Pytorch,代码行数:33,代码来源:minibatch.py

示例6: _get_image_blob

# 需要导入模块: from model.utils.config import cfg [as 别名]
# 或者: from model.utils.config.cfg import PIXEL_MEANS [as 别名]
def _get_image_blob(im):
  """Converts an image into a network input.
  Arguments:
    im (ndarray): a color image in BGR order
  Returns:
    blob (ndarray): a data blob holding an image pyramid
    im_scale_factors (list): list of image scales (relative to im) used
      in the image pyramid
  """
  im_orig = im.astype(np.float32, copy=True)
  im_orig -= cfg.PIXEL_MEANS

  im_shape = im_orig.shape
  im_size_min = np.min(im_shape[0:2])
  im_size_max = np.max(im_shape[0:2])

  processed_ims = []
  im_scale_factors = []

  for target_size in cfg.TEST.SCALES:
    im_scale = float(target_size) / float(im_size_min)
    # Prevent the biggest axis from being more than MAX_SIZE
    if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
      im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
    im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
            interpolation=cv2.INTER_LINEAR)
    im_scale_factors.append(im_scale)
    processed_ims.append(im)

  # Create a blob to hold the input images
  blob = im_list_to_blob(processed_ims)

  return blob, np.array(im_scale_factors) 
开发者ID:Feynman27,项目名称:pytorch-detect-to-track,代码行数:35,代码来源:demo.py

示例7: _get_image_blob

# 需要导入模块: from model.utils.config import cfg [as 别名]
# 或者: from model.utils.config.cfg import PIXEL_MEANS [as 别名]
def _get_image_blob(roidb, scale_inds):
  """Builds an input blob from the images in the roidb at the specified
  scales.
  """
  num_images = len(roidb)

  processed_ims = []
  im_scales = []
  for i in range(num_images):
    im = cv2.imread(roidb[i]['image'])
    #im = imread(roidb[i]['image'])

    if len(im.shape) == 2:
      im = im[:,:,np.newaxis]
      im = np.concatenate((im,im,im), axis=2)
    # flip the channel, since the original one using cv2
    # rgb -> bgr
    #im = im[:,:,::-1]

    if roidb[i]['flipped']:
      im = im[:, ::-1, :]
    target_size = cfg.TRAIN.SCALES[scale_inds[i]]
    im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
                    cfg.TRAIN.MAX_SIZE)
    im_scales.append(im_scale)
    processed_ims.append(im)

  # Create a blob to hold the input images
  blob = im_list_to_blob(processed_ims)

  return blob, im_scales 
开发者ID:Feynman27,项目名称:pytorch-detect-to-track,代码行数:33,代码来源:minibatch.py

示例8: _get_image_blob

# 需要导入模块: from model.utils.config import cfg [as 别名]
# 或者: from model.utils.config.cfg import PIXEL_MEANS [as 别名]
def _get_image_blob(self, im, frame_id):
        '''Convert image into network input.
        :param im: BGR nd.array
        :param frame_id: frame number in the given video
        :return image (frame) blob
        '''
        im_orig = im.astype(np.float32, copy=True)
        im_orig -= cfg.PIXEL_MEANS

        im_shape = im_orig.shape
        im_size_min = np.min(im_shape[0:2])
        im_size_max = np.max(im_shape[0:2])

        processed_ims = []
        im_scale_factors = []

        for target_size in cfg.TEST.SCALES:
            im_scale = float(target_size) / float(im_size_min)
            # Prevent the biggest axis from being more than MAX_SIZE
            if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
                im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
            im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,interpolation=cv2.INTER_LINEAR)
            im_scale_factors.append(im_scale)
            processed_ims.append(im)

        blob = im_list_to_blob(processed_ims)
        scales = np.array(im_scale_factors)

        blobs = {'data': blob}
        blobs['im_info'] = np.array(
                [[blob.shape[1], blob.shape[2], scales[0]]], dtype=np.float32)
        blobs['frame_number'] = np.array([[frame_id]])

        return blobs 
开发者ID:Feynman27,项目名称:pytorch-detect-to-track,代码行数:36,代码来源:tracking_utils.py

示例9: _get_image_blob

# 需要导入模块: from model.utils.config import cfg [as 别名]
# 或者: from model.utils.config.cfg import PIXEL_MEANS [as 别名]
def _get_image_blob(im):
    """Converts an image into a network input.
    Arguments:
      im (ndarray): a color image in BGR order
    Returns:
      blob (ndarray): a data blob holding an image pyramid
      im_scale_factors (list): list of image scales (relative to im) used
        in the image pyramid
    """
    im_orig = im.astype(np.float32, copy=True)
    im_orig -= cfg.PIXEL_MEANS

    im_shape = im_orig.shape
    im_size_min = np.min(im_shape[0:2])
    im_size_max = np.max(im_shape[0:2])

    processed_ims = []
    im_scale_factors = []

    for target_size in cfg.TEST.SCALES:
        im_scale = float(target_size) / float(im_size_min)
        # Prevent the biggest axis from being more than MAX_SIZE
        if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
            im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
        im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
                        interpolation=cv2.INTER_LINEAR)
        im_scale_factors.append(im_scale)
        processed_ims.append(im)

    # Create a blob to hold the input images
    blob = im_list_to_blob(processed_ims)

    return blob, np.array(im_scale_factors) 
开发者ID:ucbdrive,项目名称:3d-vehicle-tracking,代码行数:35,代码来源:demo.py

示例10: _get_image_blob

# 需要导入模块: from model.utils.config import cfg [as 别名]
# 或者: from model.utils.config.cfg import PIXEL_MEANS [as 别名]
def _get_image_blob(roidb, scale_inds):
    """Builds an input blob from the images in the roidb at the specified
    scales.
    """
    num_images = len(roidb)

    processed_ims = []
    im_scales = []
    for i in range(num_images):
        # im = cv2.imread(roidb[i]['image'])
        im = imread(roidb[i]['image'])

        if len(im.shape) == 2:
            im = im[:, :, np.newaxis]
            im = np.concatenate((im, im, im), axis=2)
        # flip the channel, since the original one using cv2
        # rgb -> bgr
        im = im[:, :, ::-1]

        if roidb[i]['flipped']:
            im = im[:, ::-1, :]
        target_size = cfg.TRAIN.SCALES[scale_inds[i]]
        im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
                                        cfg.TRAIN.MAX_SIZE)
        im_scales.append(im_scale)
        processed_ims.append(im)

    # Create a blob to hold the input images
    blob = im_list_to_blob(processed_ims)

    return blob, im_scales 
开发者ID:ucbdrive,项目名称:3d-vehicle-tracking,代码行数:33,代码来源:minibatch.py

示例11: _get_image_blob

# 需要导入模块: from model.utils.config import cfg [as 别名]
# 或者: from model.utils.config.cfg import PIXEL_MEANS [as 别名]
def _get_image_blob(roidb, scale_inds):
  """Builds an input blob from the images in the roidb at the specified
  scales.
  """
  num_images = len(roidb)

  processed_ims = []
  im_scales = []
  for i in range(num_images):
    #im = cv2.imread(roidb[i]['image'])
    im = imread(roidb[i]['image'])

    if len(im.shape) == 2:
      im = im[:,:,np.newaxis]
      im = np.concatenate((im,im,im), axis=2)
    # flip the channel, since the original one using cv2
    # rgb -> bgr
    im = im[:,:,::-1]

    if roidb[i]['flipped']:
      im = im[:, ::-1, :]
    target_size = cfg.TRAIN.SCALES[scale_inds[i]]
    im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
                    cfg.TRAIN.MAX_SIZE)
    im_scales.append(im_scale)
    processed_ims.append(im)

  # Create a blob to hold the input images
  blob = im_list_to_blob(processed_ims)

  return blob, im_scales 
开发者ID:twangnh,项目名称:Distilling-Object-Detectors,代码行数:33,代码来源:minibatch.py

示例12: _get_image_blob

# 需要导入模块: from model.utils.config import cfg [as 别名]
# 或者: from model.utils.config.cfg import PIXEL_MEANS [as 别名]
def _get_image_blob(roidb, target_size):
  """Builds an input blob from the images in the roidb at the specified
  scales.
  """
  num_images = len(roidb)

  processed_ims = []
  im_scales = []
  for i in range(num_images):
    #im = cv2.imread(roidb[i]['image'])
    im = imread(roidb[i]['image'])

    if len(im.shape) == 2:
      im = im[:,:,np.newaxis]
      im = np.concatenate((im,im,im), axis=2)
    # flip the channel, since the original one using cv2
    # rgb -> bgr
    im = im[:,:,::-1]

    if roidb[i]['flipped']:
      im = im[:, ::-1, :]
    im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size[i],
                    cfg.TRAIN.MAX_SIZE)
    im_scales.append(im_scale)
    processed_ims.append(im)

  # Create a blob to hold the input images
  blob = im_list_to_blob(processed_ims)

  return blob, im_scales 
开发者ID:princewang1994,项目名称:RFCN_CoupleNet.pytorch,代码行数:32,代码来源:minibatch.py

示例13: _get_image_blob

# 需要导入模块: from model.utils.config import cfg [as 别名]
# 或者: from model.utils.config.cfg import PIXEL_MEANS [as 别名]
def _get_image_blob(roidb, scale_inds):
  """Builds an input blob from the images in the roidb at the specified
  scales.
  """
  num_images = len(roidb)

  processed_ims = []
  im_scales = []
  for i in range(num_images):
    #im = cv2.imread(roidb[i]['image'])
    im = imread(roidb[i]['image'])

    if len(im.shape) == 2:
      im = im[:,:,np.newaxis]
      im = np.concatenate((im,im,im), axis=2)
    # flip the channel, since the original one using cv2
    # rgb -> bgr
    # im = im[:,:,::-1]

    if roidb[i]['flipped']:
      im = im[:, ::-1, :]
    target_size = cfg.TRAIN.SCALES[scale_inds[i]]
    im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
                    cfg.TRAIN.MAX_SIZE)
    im_scales.append(im_scale)
    processed_ims.append(im)

  # Create a blob to hold the input images
  blob = im_list_to_blob(processed_ims)

  return blob, im_scales 
开发者ID:timy90022,项目名称:One-Shot-Object-Detection,代码行数:33,代码来源:minibatch.py

示例14: get_image_blob

# 需要导入模块: from model.utils.config import cfg [as 别名]
# 或者: from model.utils.config.cfg import PIXEL_MEANS [as 别名]
def get_image_blob(im):
    """Converts an image into a network input.
    Arguments:
        im (ndarray): a color image
    Returns:
        blob (ndarray): a data blob holding an image pyramid
        im_scale_factors (list): list of image scales (relative to im) used
            in the image pyramid
    """
    im_orig = im.astype(np.float32, copy=True)
    im_orig -= cfg.PIXEL_MEANS

    im_shape = im_orig.shape
    im_size_min = np.min(im_shape[0:2])
    im_size_max = np.max(im_shape[0:2])

    processed_ims = []
    im_scale_factors = []

    for target_size in cfg.TEST.SCALES:
        im_scale = float(target_size) / float(im_size_min)
        # Prevent the biggest axis from being more than MAX_SIZE
        if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
            im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
        im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
                        interpolation=cv2.INTER_LINEAR)
        im_scale_factors.append(im_scale)
        processed_ims.append(im)

    # Create a blob to hold the input images
    blob = im_list_to_blob(processed_ims)

    return blob, np.array(im_scale_factors) 
开发者ID:violetteshev,项目名称:bottom-up-features,代码行数:35,代码来源:utils.py

示例15: _get_image_blob

# 需要导入模块: from model.utils.config import cfg [as 别名]
# 或者: from model.utils.config.cfg import PIXEL_MEANS [as 别名]
def _get_image_blob(im):
    """Converts an image into a network input.
    Arguments:
        im (ndarray): a color image in BGR order
    Returns:
        blob (ndarray): a data blob holding an image pyramid
        im_scale_factors (list): list of image scales (relative to im) used
            in the image pyramid
        im_shapes: the list of image shapes
    """
    im_orig = im.astype(np.float32, copy=True)
    im_orig -= cfg.PIXEL_MEANS
    im_shape = im_orig.shape
    im_size_max = np.max(im_shape[0:2])
    processed_ims = []
    im_scale_factors = []

    for target_size in cfg.TEST.SCALES:
        im_scale = float(target_size) / float(im_size_max)
        im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
                        interpolation=cv2.INTER_LINEAR)
        im_scale_factors.append(im_scale)
        processed_ims.append(im_list_to_blob([im]))

    blob = processed_ims
    return blob, np.array(im_scale_factors) 
开发者ID:jd730,项目名称:OICR-pytorch,代码行数:28,代码来源:test_oicr.py


注:本文中的model.utils.config.cfg.PIXEL_MEANS属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。