本文整理匯總了Python中core.config.cfg方法的典型用法代碼示例。如果您正苦於以下問題:Python config.cfg方法的具體用法?Python config.cfg怎麽用?Python config.cfg使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類core.config
的用法示例。
在下文中一共展示了config.cfg方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: collect
# 需要導入模塊: from core import config [as 別名]
# 或者: from core.config import cfg [as 別名]
def collect(inputs, is_training):
cfg_key = 'TRAIN' if is_training else 'TEST'
post_nms_topN = int(cfg[cfg_key].RPN_POST_NMS_TOP_N * cfg.FPN.RPN_COLLECT_SCALE + 0.5)
k_max = cfg.FPN.RPN_MAX_LEVEL
k_min = cfg.FPN.RPN_MIN_LEVEL
num_lvls = k_max - k_min + 1
roi_inputs = inputs[:num_lvls]
score_inputs = inputs[num_lvls:]
# rois are in [[batch_idx, x0, y0, x1, y2], ...] format
# Combine predictions across all levels and retain the top scoring
rois = np.concatenate(roi_inputs)
scores = np.concatenate(score_inputs).squeeze()
inds = np.argsort(-scores)[:post_nms_topN]
rois = rois[inds, :]
return rois
示例2: parse_args
# 需要導入模塊: from core import config [as 別名]
# 或者: from core.config import cfg [as 別名]
def parse_args():
"""Parse in command line arguments"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument(
'--dataset',
help='training dataset')
parser.add_argument(
'--cfg', dest='cfg_file', required=True,
help='optional config file')
parser.add_argument(
'--result_path',
help='the path for result file.')
parser.add_argument(
'--output_dir',
help='output directory to save the testing results.')
parser.add_argument(
'--set', dest='set_cfgs',
help='set config keys, will overwrite config in the cfg_file.'
' See lib/core/config.py for all options',
default=[], nargs='*')
return parser.parse_args()
示例3: __init__
# 需要導入模塊: from core import config [as 別名]
# 或者: from core.config import cfg [as 別名]
def __init__(self):
super().__init__()
cfg = [[64, 64, 'M'], [128, 128, 'M'], [256, 256, 256, 'M'], [512, 512, 512, 'M'], [512, 512, 512]] # Prune the conv5 max pool
dim_in = 3
for i in range(len(cfg)):
for j in range(len(cfg[i])):
if cfg[i][j] == 'M':
setattr(self, 'pool%d'%(i+1), nn.MaxPool2d(kernel_size=2, stride=2))
else:
setattr(self, 'conv%d_%d'%(i+1,j+1), nn.Conv2d(dim_in, cfg[i][j], kernel_size=3, padding=1))
setattr(self, 'relu%d_%d'%(i+1,j+1), nn.ReLU(inplace=True))
dim_in = cfg[i][j]
self.spatial_scale = 1. / 16.
self.dim_out = dim_in
self._init_modules()
示例4: get_roidb
# 需要導入模塊: from core import config [as 別名]
# 或者: from core.config import cfg [as 別名]
def get_roidb(dataset_name, ind_range):
"""Get the roidb for the dataset specified in the global cfg. Optionally
restrict it to a range of indices if ind_range is a pair of integers.
"""
dataset = JsonDataset(dataset_name)
roidb = dataset.get_roidb(gt=cfg.TEST.USE_GT_PROPOSALS)
if ind_range is not None:
total_num_images = len(roidb)
start, end = ind_range
roidb = roidb[start:end]
else:
start = 0
end = len(roidb)
total_num_images = end
return roidb, start, end, total_num_images
示例5: collect
# 需要導入模塊: from core import config [as 別名]
# 或者: from core.config import cfg [as 別名]
def collect(inputs, is_training):
cfg_key = 'TRAIN' if is_training else 'TEST'
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
k_max = cfg.FPN.RPN_MAX_LEVEL
k_min = cfg.FPN.RPN_MIN_LEVEL
num_lvls = k_max - k_min + 1
roi_inputs = inputs[:num_lvls]
score_inputs = inputs[num_lvls:]
if is_training:
score_inputs = score_inputs[:-2]
# rois are in [[batch_idx, x0, y0, x1, y2], ...] format
# Combine predictions across all levels and retain the top scoring
rois = np.concatenate([blob.data for blob in roi_inputs])
scores = np.concatenate([blob.data for blob in score_inputs]).squeeze()
inds = np.argsort(-scores)[:post_nms_topN]
rois = rois[inds, :]
return rois
示例6: distribute
# 需要導入模塊: from core import config [as 別名]
# 或者: from core.config import cfg [as 別名]
def distribute(rois, label_blobs, outputs, train):
"""To understand the output blob order see return value of
roi_data.fast_rcnn.get_fast_rcnn_blob_names(is_training=False)
"""
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
lvls = fpn.map_rois_to_fpn_levels(rois[:, 1:5], lvl_min, lvl_max)
outputs[0].reshape(rois.shape)
outputs[0].data[...] = rois
# Create new roi blobs for each FPN level
# (See: modeling.FPN.add_multilevel_roi_blobs which is similar but annoying
# to generalize to support this particular case.)
rois_idx_order = np.empty((0, ))
for output_idx, lvl in enumerate(range(lvl_min, lvl_max + 1)):
idx_lvl = np.where(lvls == lvl)[0]
blob_roi_level = rois[idx_lvl, :]
outputs[output_idx + 1].reshape(blob_roi_level.shape)
outputs[output_idx + 1].data[...] = blob_roi_level
rois_idx_order = np.concatenate((rois_idx_order, idx_lvl))
rois_idx_restore = np.argsort(rois_idx_order)
blob_utils.py_op_copy_blob(rois_idx_restore.astype(np.int32), outputs[-1])
示例7: do_reval
# 需要導入模塊: from core import config [as 別名]
# 或者: from core.config import cfg [as 別名]
def do_reval(dataset_name, output_dir, args):
dataset = JsonDataset(dataset_name)
with open(os.path.join(output_dir, 'detections.pkl'), 'rb') as f:
dets = pickle.load(f)
# Override config with the one saved in the detections file
if args.cfg_file is not None:
core.config.merge_cfg_from_cfg(yaml.load(dets['cfg']))
else:
core.config._merge_a_into_b(yaml.load(dets['cfg']), cfg)
results = task_evaluation.evaluate_all(
dataset,
dets['all_boxes'],
dets['all_segms'],
dets['all_keyps'],
output_dir,
use_matlab=args.matlab_eval
)
task_evaluation.log_copy_paste_friendly_results(results)
示例8: initialize_model_from_cfg
# 需要導入模塊: from core import config [as 別名]
# 或者: from core.config import cfg [as 別名]
def initialize_model_from_cfg():
"""Initialize a model from the global cfg. Loads test-time weights and
creates the networks in the Caffe2 workspace.
"""
model = model_builder.create(cfg.MODEL.TYPE, train=False)
net_utils.initialize_from_weights_file(
model, cfg.TEST.WEIGHTS, broadcast=False
)
model_builder.add_inference_inputs(model)
workspace.CreateNet(model.net)
workspace.CreateNet(model.conv_body_net)
if cfg.MODEL.MASK_ON:
workspace.CreateNet(model.mask_net)
if cfg.MODEL.KEYPOINTS_ON:
workspace.CreateNet(model.keypoint_net)
return model
示例9: get_roidb_and_dataset
# 需要導入模塊: from core import config [as 別名]
# 或者: from core.config import cfg [as 別名]
def get_roidb_and_dataset(ind_range):
"""Get the roidb for the dataset specified in the global cfg. Optionally
restrict it to a range of indices if ind_range is a pair of integers.
"""
dataset = TextDataSet(cfg.TEST.DATASET)
if cfg.MODEL.FASTER_RCNN:
roidb = dataset.get_roidb()
else:
roidb = dataset.get_roidb(
proposal_file=cfg.TEST.PROPOSAL_FILE,
proposal_limit=cfg.TEST.PROPOSAL_LIMIT
)
if ind_range is not None:
total_num_images = len(roidb)
start, end = ind_range
roidb = roidb[start:end]
else:
start = 0
end = len(roidb)
total_num_images = end
return roidb, dataset, start, end, total_num_images
示例10: initialize_model_from_cfg
# 需要導入模塊: from core import config [as 別名]
# 或者: from core.config import cfg [as 別名]
def initialize_model_from_cfg(args, gpu_id=0):
"""Initialize a model from the global cfg. Loads test-time weights and
set to evaluation mode.
"""
model = model_builder_rel.Generalized_RCNN()
model.eval()
if args.cuda:
model.cuda()
if args.load_ckpt:
load_name = args.load_ckpt
logger.info("loading checkpoint %s", load_name)
checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)
net_utils.load_ckpt(model, checkpoint['model'])
if args.load_detectron:
logger.info("loading detectron weights %s", args.load_detectron)
load_detectron_weight(model, args.load_detectron)
model = mynn.DataParallel(model, cpu_keywords=['im_info', 'roidb'], minibatch=True)
return model
示例11: collect
# 需要導入模塊: from core import config [as 別名]
# 或者: from core.config import cfg [as 別名]
def collect(inputs, is_training):
cfg_key = 'TRAIN' if is_training else 'TEST'
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
k_max = cfg.FPN.RPN_MAX_LEVEL
k_min = cfg.FPN.RPN_MIN_LEVEL
num_lvls = k_max - k_min + 1
roi_inputs = inputs[:num_lvls]
score_inputs = inputs[num_lvls:]
if is_training:
score_inputs = score_inputs[:-2]
# rois are in (for each time frame ti)
# [[batch_idx, x0t0, y0t0, x1t0, y2t0, x0t1, y0t1, x1t1, y2t1], ...] format
# Combine predictions across all levels and retain the top scoring
rois = np.concatenate([blob.data for blob in roi_inputs])
scores = np.concatenate([blob.data for blob in score_inputs]).squeeze()
inds = np.argsort(-scores)[:post_nms_topN]
rois = rois[inds, :]
return rois
示例12: distribute
# 需要導入模塊: from core import config [as 別名]
# 或者: from core.config import cfg [as 別名]
def distribute(rois, label_blobs, outputs, train):
"""To understand the output blob order see return value of
roi_data.fast_rcnn.get_fast_rcnn_blob_names(is_training=False)
"""
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
lvls = fpn.map_rois_to_fpn_levels(rois[:, 1:], lvl_min, lvl_max)
outputs[0].reshape(rois.shape)
outputs[0].data[...] = rois
# Create new roi blobs for each FPN level
# (See: modeling.FPN.add_multilevel_roi_blobs which is similar but annoying
# to generalize to support this particular case.)
rois_idx_order = np.empty((0, ))
for output_idx, lvl in enumerate(range(lvl_min, lvl_max + 1)):
idx_lvl = np.where(lvls == lvl)[0]
blob_roi_level = rois[idx_lvl, :]
outputs[output_idx + 1].reshape(blob_roi_level.shape)
outputs[output_idx + 1].data[...] = blob_roi_level
rois_idx_order = np.concatenate((rois_idx_order, idx_lvl))
rois_idx_restore = np.argsort(rois_idx_order)
blob_utils.py_op_copy_blob(rois_idx_restore.astype(np.int32), outputs[-1])
示例13: distribute
# 需要導入模塊: from core import config [as 別名]
# 或者: from core.config import cfg [as 別名]
def distribute(rois, label_blobs):
"""To understand the output blob order see return value of
roi_data.fast_rcnn.get_fast_rcnn_blob_names(is_training=False)
"""
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
lvls = fpn_utils.map_rois_to_fpn_levels(rois[:, 1:5], lvl_min, lvl_max)
# Delete roi entries that have negative area
# idx_neg = np.where(lvls == -1)[0]
# rois = np.delete(rois, idx_neg, axis=0)
# lvls = np.delete(lvls, idx_neg, axis=0)
output_blob_names = roi_data.fast_rcnn.get_fast_rcnn_blob_names(is_training=False)
outputs = [None] * len(output_blob_names)
outputs[0] = rois
# Create new roi blobs for each FPN level
# (See: utils.fpn.add_multilevel_roi_blobs which is similar but annoying
# to generalize to support this particular case.)
rois_idx_order = np.empty((0, ))
for output_idx, lvl in enumerate(range(lvl_min, lvl_max + 1)):
idx_lvl = np.where(lvls == lvl)[0]
blob_roi_level = rois[idx_lvl, :]
outputs[output_idx + 1] = blob_roi_level
rois_idx_order = np.concatenate((rois_idx_order, idx_lvl))
rois_idx_restore = np.argsort(rois_idx_order)
outputs[-1] = rois_idx_restore.astype(np.int32)
return dict(zip(output_blob_names, outputs))
示例14: parse_args
# 需要導入模塊: from core import config [as 別名]
# 或者: from core.config import cfg [as 別名]
def parse_args():
"""Parse in command line arguments"""
parser = argparse.ArgumentParser(description='Demonstrate mask-rcnn results')
parser.add_argument(
'--dataset', required=True,
help='training dataset')
parser.add_argument(
'--cfg', dest='cfg_file', required=True,
help='optional config file')
parser.add_argument(
'--set', dest='set_cfgs',
help='set config keys, will overwrite config in the cfg_file',
default=[], nargs='+')
parser.add_argument(
'--no_cuda', dest='cuda', help='whether use CUDA', action='store_false')
parser.add_argument('--load_ckpt', help='path of checkpoint to load')
parser.add_argument(
'--load_detectron', help='path to the detectron weight pickle file')
parser.add_argument(
'--image_dir',
help='directory to load images for demo')
parser.add_argument(
'--images', nargs='+',
help='images to infer. Must not use with --image_dir')
parser.add_argument(
'--output_dir',
help='directory to save demo results',
default="infer_outputs")
parser.add_argument(
'--merge_pdfs', type=distutils.util.strtobool, default=True)
args = parser.parse_args()
return args