本文整理匯總了Python中detectron.core.config.cfg.PIXEL_MEANS屬性的典型用法代碼示例。如果您正苦於以下問題:Python cfg.PIXEL_MEANS屬性的具體用法?Python cfg.PIXEL_MEANS怎麽用?Python cfg.PIXEL_MEANS使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類detectron.core.config.cfg
的用法示例。
在下文中一共展示了cfg.PIXEL_MEANS屬性的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: get_image_blob
# 需要導入模塊: from detectron.core.config import cfg [as 別名]
# 或者: from detectron.core.config.cfg import PIXEL_MEANS [as 別名]
def get_image_blob(im, target_scale, target_max_size):
"""Convert an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale (float): image scale (target size) / (original size)
im_info (ndarray)
"""
processed_im, im_scale = prep_im_for_blob(
im, cfg.PIXEL_MEANS, target_scale, target_max_size
)
blob = im_list_to_blob(processed_im)
# NOTE: this height and width may be larger than actual scaled input image
# due to the FPN.COARSEST_STRIDE related padding in im_list_to_blob. We are
# maintaining this behavior for now to make existing results exactly
# reproducible (in practice using the true input image height and width
# yields nearly the same results, but they are sometimes slightly different
# because predictions near the edge of the image will be pruned more
# aggressively).
height, width = blob.shape[2], blob.shape[3]
im_info = np.hstack((height, width, im_scale))[np.newaxis, :]
return blob, im_scale, im_info.astype(np.float32)
示例2: _get_image_blob
# 需要導入模塊: from detectron.core.config import cfg [as 別名]
# 或者: from detectron.core.config.cfg import PIXEL_MEANS [as 別名]
def _get_image_blob(roidb):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
scale_inds = np.random.randint(
0, high=len(cfg.TRAIN.SCALES), size=num_images
)
processed_ims = []
im_scales = []
for i in range(num_images):
im = cv2.imread(roidb[i]['image'])
assert im is not None, \
'Failed to read image \'{}\''.format(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = blob_utils.prep_im_for_blob(
im, cfg.PIXEL_MEANS, target_size, cfg.TRAIN.MAX_SIZE
)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = blob_utils.im_list_to_blob(processed_ims)
return blob, im_scales
示例3: run_model_pb
# 需要導入模塊: from detectron.core.config import cfg [as 別名]
# 或者: from detectron.core.config.cfg import PIXEL_MEANS [as 別名]
def run_model_pb(args, net, init_net, im, check_blobs):
workspace.ResetWorkspace()
workspace.RunNetOnce(init_net)
mutils.create_input_blobs_for_net(net.Proto())
workspace.CreateNet(net)
# input_blobs, _ = core_test._get_blobs(im, None)
input_blobs = _prepare_blobs(
im,
cfg.PIXEL_MEANS,
cfg.TEST.SCALE, cfg.TEST.MAX_SIZE
)
gpu_blobs = []
if args.device == 'gpu':
gpu_blobs = ['data']
for k, v in input_blobs.items():
workspace.FeedBlob(
core.ScopedName(k),
v,
mutils.get_device_option_cuda() if k in gpu_blobs else
mutils.get_device_option_cpu()
)
try:
workspace.RunNet(net)
scores = workspace.FetchBlob('score_nms')
classids = workspace.FetchBlob('class_nms')
boxes = workspace.FetchBlob('bbox_nms')
except Exception as e:
print('Running pb model failed.\n{}'.format(e))
# may not detect anything at all
R = 0
scores = np.zeros((R,), dtype=np.float32)
boxes = np.zeros((R, 4), dtype=np.float32)
classids = np.zeros((R,), dtype=np.float32)
boxes = np.column_stack((boxes, scores))
# sort the results based on score for comparision
boxes, _, _, classids = _sort_results(
boxes, None, None, classids)
# write final result back to workspace
workspace.FeedBlob('result_boxes', boxes)
workspace.FeedBlob('result_classids', classids)
ret = _get_result_blobs(check_blobs)
return ret