本文整理匯總了Python中utils.load_model.load_param方法的典型用法代碼示例。如果您正苦於以下問題:Python load_model.load_param方法的具體用法?Python load_model.load_param怎麽用?Python load_model.load_param使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類utils.load_model
的用法示例。
在下文中一共展示了load_model.load_param方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_rcnn
# 需要導入模塊: from utils import load_model [as 別名]
# 或者: from utils.load_model import load_param [as 別名]
def test_rcnn(cfg, dataset, image_set, root_path, dataset_path,
ctx, prefix, epoch,
vis, ignore_cache, shuffle, has_rpn, proposal, thresh, logger=None, output_path=None):
if not logger:
assert False, 'require a logger'
# print cfg
pprint.pprint(cfg)
logger.info('testing cfg:{}\n'.format(pprint.pformat(cfg)))
# load symbol and testing data
key_sym_instance = eval(cfg.symbol + '.' + cfg.symbol)()
cur_sym_instance = eval(cfg.symbol + '.' + cfg.symbol)()
key_sym = key_sym_instance.get_key_test_symbol(cfg)
cur_sym = cur_sym_instance.get_cur_test_symbol(cfg)
imdb = eval(dataset)(image_set, root_path, dataset_path, result_path=output_path)
roidb = imdb.gt_roidb()
# get test data iter
# split roidbs
gpu_num = len(ctx)
roidbs = [[] for x in range(gpu_num)]
roidbs_seg_lens = np.zeros(gpu_num, dtype=np.int)
for x in roidb:
gpu_id = np.argmin(roidbs_seg_lens)
roidbs[gpu_id].append(x)
roidbs_seg_lens[gpu_id] += x['frame_seg_len']
# get test data iter
test_datas = [TestLoader(x, cfg, batch_size=1, shuffle=shuffle, has_rpn=has_rpn) for x in roidbs]
# load model
arg_params, aux_params = load_param(prefix, epoch, process=True)
# create predictor
key_predictors = [get_predictor(key_sym, key_sym_instance, cfg, arg_params, aux_params, test_datas[i], [ctx[i]]) for i in range(gpu_num)]
cur_predictors = [get_predictor(cur_sym, cur_sym_instance, cfg, arg_params, aux_params, test_datas[i], [ctx[i]]) for i in range(gpu_num)]
# start detection
#pred_eval(0, key_predictors[0], cur_predictors[0], test_datas[0], imdb, cfg, vis=vis, ignore_cache=ignore_cache, thresh=thresh, logger=logger)
pred_eval_multiprocess(gpu_num, key_predictors, cur_predictors, test_datas, imdb, cfg, vis=vis, ignore_cache=ignore_cache, thresh=thresh, logger=logger)
示例2: test_deeplab
# 需要導入模塊: from utils import load_model [as 別名]
# 或者: from utils.load_model import load_param [as 別名]
def test_deeplab(network, dataset, image_set, root_path, dataset_path,
ctx, prefix, epoch,
vis, logger=None, output_path=None):
if not logger:
assert False, 'require a logger'
# print config
pprint.pprint(config)
logger.info('testing config:{}\n'.format(pprint.pformat(config)))
# load symbol and testing data
sym = eval('get_' + network + '_test')(num_classes=config.dataset.NUM_CLASSES)
imdb = eval(dataset)(image_set, root_path, dataset_path, result_path=output_path)
segdb = imdb.gt_segdb()
# get test data iter
test_data = TestDataLoader(segdb, batch_size=len(ctx))
# load model
# arg_params, aux_params = load_param(prefix, epoch, convert=True, ctx=ctx, process=True)
arg_params, aux_params = load_param(prefix, epoch, process=True)
# infer shape
data_shape_dict = dict(test_data.provide_data_single)
arg_shape, _, aux_shape = sym.infer_shape(**data_shape_dict)
arg_shape_dict = dict(zip(sym.list_arguments(), arg_shape))
aux_shape_dict = dict(zip(sym.list_auxiliary_states(), aux_shape))
# check parameters
for k in sym.list_arguments():
if k in data_shape_dict or k in ['softmax_label']:
continue
assert k in arg_params, k + ' not initialized'
assert arg_params[k].shape == arg_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(arg_shape_dict[k]) + ' provided ' + str(arg_params[k].shape)
for k in sym.list_auxiliary_states():
assert k in aux_params, k + ' not initialized'
assert aux_params[k].shape == aux_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(aux_shape_dict[k]) + ' provided ' + str(aux_params[k].shape)
# decide maximum shape
data_names = [k[0] for k in test_data.provide_data_single]
label_names = ['softmax_label']
max_data_shape = [[('data', (1, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]]
# create predictor
predictor = Predictor(sym, data_names, label_names,
context=ctx, max_data_shapes=max_data_shape,
provide_data=test_data.provide_data, provide_label=test_data.provide_label,
arg_params=arg_params, aux_params=aux_params)
# start detection
pred_eval(predictor, test_data, imdb, vis=vis, logger=logger)
示例3: test_deeplab
# 需要導入模塊: from utils import load_model [as 別名]
# 或者: from utils.load_model import load_param [as 別名]
def test_deeplab():
epoch = config.TEST.test_epoch
ctx = [mx.gpu(int(i)) for i in config.gpus.split(',')]
image_set = config.dataset.test_image_set
root_path = config.dataset.root_path
dataset = config.dataset.dataset
dataset_path = config.dataset.dataset_path
logger, final_output_path = create_logger(config.output_path, args.cfg, image_set)
prefix = os.path.join(final_output_path, '..', '_'.join([iset for iset in config.dataset.image_set.split('+')]), config.TRAIN.model_prefix)
# print config
pprint.pprint(config)
logger.info('testing config:{}\n'.format(pprint.pformat(config)))
# load symbol and testing data
sym_instance = eval(config.symbol + '.' + config.symbol)()
sym = sym_instance.get_symbol(config, is_train=False)
imdb = eval(dataset)(image_set, root_path, dataset_path, result_path=final_output_path)
segdb = imdb.gt_segdb()
# get test data iter
test_data = TestDataLoaderVideo(segdb, config=config, batch_size=len(ctx))
# infer shape
data_shape_dict = dict(test_data.provide_data_single)
print data_shape_dict
sym_instance.infer_shape(data_shape_dict)
# load model and check parameters
arg_params, aux_params = load_param(prefix, epoch, process=True)
sym_instance.check_parameter_shapes(arg_params, aux_params, data_shape_dict, is_train=False)
# decide maximum shape
data_names = [k[0] for k in test_data.provide_data_single]
label_names = ['softmax_label']
max_data_shape = [[('data', (1, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES]))),]]
# create predictor
predictor = Predictor(sym, data_names, label_names,
context=ctx, max_data_shapes=max_data_shape,
provide_data=test_data.provide_data, provide_label=test_data.provide_label,
arg_params=arg_params, aux_params=aux_params)
# start detection
pred_eval(predictor, test_data, imdb, vis=args.vis, ignore_cache=args.ignore_cache, logger=logger)
示例4: test_deeplab
# 需要導入模塊: from utils import load_model [as 別名]
# 或者: from utils.load_model import load_param [as 別名]
def test_deeplab():
epoch = config.TEST.test_epoch
ctx = [mx.gpu(int(i)) for i in config.gpus.split(',')]
image_set = config.dataset.test_image_set
root_path = config.dataset.root_path
dataset = config.dataset.dataset
dataset_path = config.dataset.dataset_path
logger, final_output_path = create_logger(config.output_path, args.cfg, image_set)
prefix = os.path.join(final_output_path, '..', '_'.join([iset for iset in config.dataset.image_set.split('+')]), config.TRAIN.model_prefix)
# print config
pprint.pprint(config)
logger.info('testing config:{}\n'.format(pprint.pformat(config)))
# load symbol and testing data
sym_instance = eval(config.symbol + '.' + config.symbol)()
sym = sym_instance.get_symbol(config, is_train=False)
imdb = eval(dataset)(image_set, root_path, dataset_path, result_path=final_output_path)
segdb = imdb.gt_segdb()
# get test data iter
test_data = TestDataLoader(segdb, config=config, batch_size=len(ctx))
# infer shape
data_shape_dict = dict(test_data.provide_data_single)
sym_instance.infer_shape(data_shape_dict)
# load model and check parameters
arg_params, aux_params = load_param(prefix, epoch, process=True)
sym_instance.check_parameter_shapes(arg_params, aux_params, data_shape_dict, is_train=False)
# decide maximum shape
data_names = [k[0] for k in test_data.provide_data_single]
label_names = ['softmax_label']
max_data_shape = [[('data', (1, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]]
# create predictor
predictor = Predictor(sym, data_names, label_names,
context=ctx, max_data_shapes=max_data_shape,
provide_data=test_data.provide_data, provide_label=test_data.provide_label,
arg_params=arg_params, aux_params=aux_params)
# start detection
pred_eval(predictor, test_data, imdb, vis=args.vis, ignore_cache=args.ignore_cache, logger=logger)
示例5: test_rpn
# 需要導入模塊: from utils import load_model [as 別名]
# 或者: from utils.load_model import load_param [as 別名]
def test_rpn(cfg, dataset, image_set, root_path, dataset_path,
ctx, prefix, epoch,
vis, shuffle, thresh, logger=None, output_path=None):
# set up logger
if not logger:
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# rpn generate proposal cfg
cfg.TEST.HAS_RPN = True
# print cfg
pprint.pprint(cfg)
logger.info('testing rpn cfg:{}\n'.format(pprint.pformat(cfg)))
# load symbol
sym_instance = eval(cfg.symbol + '.' + cfg.symbol)()
sym = sym_instance.get_symbol_rpn(cfg, is_train=False)
# load dataset and prepare imdb for training
imdb = eval(dataset)(image_set, root_path, dataset_path, result_path=output_path)
roidb = imdb.gt_roidb()
test_data = TestLoader(roidb, cfg, batch_size=len(ctx), shuffle=shuffle, has_rpn=True)
# load model
arg_params, aux_params = load_param(prefix, epoch)
# infer shape
data_shape_dict = dict(test_data.provide_data_single)
sym_instance.infer_shape(data_shape_dict)
# check parameters
sym_instance.check_parameter_shapes(arg_params, aux_params, data_shape_dict, is_train=False)
# decide maximum shape
data_names = [k[0] for k in test_data.provide_data[0]]
label_names = None if test_data.provide_label[0] is None else [k[0] for k in test_data.provide_label[0]]
max_data_shape = [[('data', (1, 3, max([v[0] for v in cfg.SCALES]), max([v[1] for v in cfg.SCALES])))]]
# create predictor
predictor = Predictor(sym, data_names, label_names,
context=ctx, max_data_shapes=max_data_shape,
provide_data=test_data.provide_data, provide_label=test_data.provide_label,
arg_params=arg_params, aux_params=aux_params)
# start testing
imdb_boxes = generate_proposals(predictor, test_data, imdb, cfg, vis=vis, thresh=thresh)
all_log_info = imdb.evaluate_recall(roidb, candidate_boxes=imdb_boxes)
logger.info(all_log_info)
示例6: test_rcnn
# 需要導入模塊: from utils import load_model [as 別名]
# 或者: from utils.load_model import load_param [as 別名]
def test_rcnn(cfg, dataset, image_set, root_path, dataset_path,
ctx, prefix, epoch,
vis, ignore_cache, shuffle, has_rpn, proposal, thresh, logger=None, output_path=None):
if not logger:
assert False, 'require a logger'
# print cfg
pprint.pprint(cfg)
logger.info('testing cfg:{}\n'.format(pprint.pformat(cfg)))
# load symbol and testing data
if has_rpn:
sym_instance = eval(cfg.symbol + '.' + cfg.symbol)()
sym = sym_instance.get_symbol(cfg, is_train=False)
imdb = eval(dataset)(image_set, root_path, dataset_path, result_path=output_path)
roidb = imdb.gt_roidb()
else:
sym_instance = eval(cfg.symbol + '.' + cfg.symbol)()
sym = sym_instance.get_symbol_rcnn(cfg, is_train=False)
imdb = eval(dataset)(image_set, root_path, dataset_path, result_path=output_path)
gt_roidb = imdb.gt_roidb()
roidb = eval('imdb.' + proposal + '_roidb')(gt_roidb)
# get test data iter
test_data = TestLoader(roidb, cfg, batch_size=len(ctx), shuffle=shuffle, has_rpn=has_rpn)
# load model
arg_params, aux_params = load_param(prefix, epoch, process=True)
# infer shape
data_shape_dict = dict(test_data.provide_data_single)
sym_instance.infer_shape(data_shape_dict)
sym_instance.check_parameter_shapes(arg_params, aux_params, data_shape_dict, is_train=False)
# decide maximum shape
data_names = [k[0] for k in test_data.provide_data_single]
label_names = None
max_data_shape = [[('data', (1, 3, max([v[0] for v in cfg.SCALES]), max([v[1] for v in cfg.SCALES])))]]
if not has_rpn:
max_data_shape.append(('rois', (cfg.TEST.PROPOSAL_POST_NMS_TOP_N + 30, 5)))
# create predictor
predictor = Predictor(sym, data_names, label_names,
context=ctx, max_data_shapes=max_data_shape,
provide_data=test_data.provide_data, provide_label=test_data.provide_label,
arg_params=arg_params, aux_params=aux_params)
# start detection
pred_eval(predictor, test_data, imdb, cfg, vis=vis, ignore_cache=ignore_cache, thresh=thresh, logger=logger)
示例7: main
# 需要導入模塊: from utils import load_model [as 別名]
# 或者: from utils.load_model import load_param [as 別名]
def main():
# get symbol
pprint.pprint(config)
sym_instance = eval(config.symbol + '.' + config.symbol)()
sym = sym_instance.get_symbol(config, is_train=False)
# load demo data
image_names = ['000240.jpg', '000437.jpg', '004072.jpg', '007912.jpg']
image_all = []
data = []
for im_name in image_names:
assert os.path.exists(cur_path + '/../demo/deform_conv/' + im_name), \
('%s does not exist'.format('../demo/deform_conv/' + im_name))
im = cv2.imread(cur_path + '/../demo/deform_conv/' + im_name, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
image_all.append(im)
target_size = config.SCALES[0][0]
max_size = config.SCALES[0][1]
im, im_scale = resize(im, target_size, max_size, stride=config.network.IMAGE_STRIDE)
im_tensor = transform(im, config.network.PIXEL_MEANS)
im_info = np.array([[im_tensor.shape[2], im_tensor.shape[3], im_scale]], dtype=np.float32)
data.append({'data': im_tensor, 'im_info': im_info})
# get predictor
data_names = ['data', 'im_info']
label_names = []
data = [[mx.nd.array(data[i][name]) for name in data_names] for i in xrange(len(data))]
max_data_shape = [[('data', (1, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]]
provide_data = [[(k, v.shape) for k, v in zip(data_names, data[i])] for i in xrange(len(data))]
provide_label = [None for i in xrange(len(data))]
arg_params, aux_params = load_param(cur_path + '/../model/deform_conv', 0, process=True)
predictor = Predictor(sym, data_names, label_names,
context=[mx.gpu(0)], max_data_shapes=max_data_shape,
provide_data=provide_data, provide_label=provide_label,
arg_params=arg_params, aux_params=aux_params)
# test
for idx, _ in enumerate(image_names):
data_batch = mx.io.DataBatch(data=[data[idx]], label=[], pad=0, index=idx,
provide_data=[[(k, v.shape) for k, v in zip(data_names, data[idx])]],
provide_label=[None])
output = predictor.predict(data_batch)
res5a_offset = output[0]['res5a_branch2b_offset_output'].asnumpy()
res5b_offset = output[0]['res5b_branch2b_offset_output'].asnumpy()
res5c_offset = output[0]['res5c_branch2b_offset_output'].asnumpy()
im = image_all[idx]
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
show_dconv_offset(im, [res5c_offset, res5b_offset, res5a_offset])
示例8: main
# 需要導入模塊: from utils import load_model [as 別名]
# 或者: from utils.load_model import load_param [as 別名]
def main():
# get symbol
pprint.pprint(config)
sym_instance = eval(config.symbol + '.' + config.symbol)()
sym = sym_instance.get_symbol_rfcn(config, is_train=False)
# load demo data
image_names = ['000057.jpg', '000149.jpg', '000351.jpg', '002535.jpg']
image_all = []
# ground truth boxes
gt_boxes_all = [np.array([[132, 52, 384, 357]]), np.array([[113, 1, 350, 360]]),
np.array([[0, 27, 329, 155]]), np.array([[8, 40, 499, 289]])]
gt_classes_all = [np.array([3]), np.array([16]), np.array([7]), np.array([12])]
data = []
for idx, im_name in enumerate(image_names):
assert os.path.exists(cur_path + '/../demo/deform_psroi/' + im_name), \
('%s does not exist'.format('../demo/deform_psroi/' + im_name))
im = cv2.imread(cur_path + '/../demo/deform_psroi/' + im_name, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
image_all.append(im)
target_size = config.SCALES[0][0]
max_size = config.SCALES[0][1]
im, im_scale = resize(im, target_size, max_size, stride=config.network.IMAGE_STRIDE)
im_tensor = transform(im, config.network.PIXEL_MEANS)
gt_boxes = gt_boxes_all[idx]
gt_boxes = np.round(gt_boxes * im_scale)
data.append({'data': im_tensor, 'rois': np.hstack((np.zeros((gt_boxes.shape[0], 1)), gt_boxes))})
# get predictor
data_names = ['data', 'rois']
label_names = []
data = [[mx.nd.array(data[i][name]) for name in data_names] for i in xrange(len(data))]
max_data_shape = [[('data', (1, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]]
provide_data = [[(k, v.shape) for k, v in zip(data_names, data[i])] for i in xrange(len(data))]
provide_label = [None for i in xrange(len(data))]
arg_params, aux_params = load_param(cur_path + '/../model/deform_psroi', 0, process=True)
predictor = Predictor(sym, data_names, label_names,
context=[mx.gpu(0)], max_data_shapes=max_data_shape,
provide_data=provide_data, provide_label=provide_label,
arg_params=arg_params, aux_params=aux_params)
# test
for idx, _ in enumerate(image_names):
data_batch = mx.io.DataBatch(data=[data[idx]], label=[], pad=0, index=idx,
provide_data=[[(k, v.shape) for k, v in zip(data_names, data[idx])]],
provide_label=[None])
output = predictor.predict(data_batch)
cls_offset = output[0]['rfcn_cls_offset_output'].asnumpy()
im = image_all[idx]
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
boxes = gt_boxes_all[idx]
show_dpsroi_offset(im, boxes, cls_offset, gt_classes_all[idx])
示例9: test_rcnn
# 需要導入模塊: from utils import load_model [as 別名]
# 或者: from utils.load_model import load_param [as 別名]
def test_rcnn(cfg, dataset, image_set, root_path, dataset_path, motion_iou_path,
ctx, prefix, epoch,
vis, ignore_cache, shuffle, has_rpn, proposal, thresh, logger=None, output_path=None, enable_detailed_eval=True):
if not logger:
assert False, 'require a logger'
# print cfg
pprint.pprint(cfg)
logger.info('testing cfg:{}\n'.format(pprint.pformat(cfg)))
# load symbol and testing data
feat_sym_instance = eval(cfg.symbol + '.' + cfg.symbol)()
aggr_sym_instance = eval(cfg.symbol + '.' + cfg.symbol)()
feat_sym = feat_sym_instance.get_feat_symbol(cfg)
aggr_sym = aggr_sym_instance.get_aggregation_symbol(cfg)
imdb = eval(dataset)(image_set, root_path, dataset_path, motion_iou_path, result_path=output_path, enable_detailed_eval=enable_detailed_eval)
roidb = imdb.gt_roidb()
# get test data iter
# split roidbs
gpu_num = len(ctx)
roidbs = [[] for x in range(gpu_num)]
roidbs_seg_lens = np.zeros(gpu_num, dtype=np.int)
for x in roidb:
gpu_id = np.argmin(roidbs_seg_lens)
roidbs[gpu_id].append(x)
roidbs_seg_lens[gpu_id] += x['frame_seg_len']
# get test data iter
test_datas = [TestLoader(x, cfg, batch_size=1, shuffle=shuffle, has_rpn=has_rpn) for x in roidbs]
# load model
arg_params, aux_params = load_param(prefix, epoch, process=True)
# create predictor
feat_predictors = [get_predictor(feat_sym, feat_sym_instance, cfg, arg_params, aux_params, test_datas[i], [ctx[i]]) for i in range(gpu_num)]
aggr_predictors = [get_predictor(aggr_sym, aggr_sym_instance, cfg, arg_params, aux_params, test_datas[i], [ctx[i]]) for i in range(gpu_num)]
# start detection
pred_eval_multiprocess(gpu_num, feat_predictors, aggr_predictors, test_datas, imdb, cfg, vis=vis, ignore_cache=ignore_cache, thresh=thresh, logger=logger)
示例10: test_rcnn
# 需要導入模塊: from utils import load_model [as 別名]
# 或者: from utils.load_model import load_param [as 別名]
def test_rcnn(cfg, dataset, image_set, root_path, dataset_path,
ctx, prefix, epoch,
vis, ignore_cache, shuffle, has_rpn, proposal, thresh, logger=None, output_path=None):
if not logger:
assert False, 'require a logger'
# print cfg
pprint.pprint(cfg)
logger.info('testing cfg:{}\n'.format(pprint.pformat(cfg)))
# load symbol and testing data
sym_instance = eval(cfg.symbol + '.' + cfg.symbol)()
sym = sym_instance.get_test_symbol(cfg)
imdb = eval(dataset)(image_set, root_path, dataset_path, result_path=output_path)
roidb = imdb.gt_roidb()
# get test data iter
test_data = TestLoader(roidb, cfg, batch_size=len(ctx), shuffle=shuffle, has_rpn=has_rpn)
# load model
arg_params, aux_params = load_param(prefix, epoch, process=True)
# infer shape
data_shape_dict = dict(test_data.provide_data_single)
sym_instance.infer_shape(data_shape_dict)
sym_instance.check_parameter_shapes(arg_params, aux_params, data_shape_dict, is_train=False)
# decide maximum shape
data_names = [k[0] for k in test_data.provide_data_single]
label_names = None
max_data_shape = [[('data', (1, 3, max([v[0] for v in cfg.SCALES]), max([v[1] for v in cfg.SCALES])))]]
if not has_rpn:
max_data_shape.append(('rois', (cfg.TEST.PROPOSAL_POST_NMS_TOP_N + 30, 5)))
# create predictor
predictor = Predictor(sym, data_names, label_names,
context=ctx, max_data_shapes=max_data_shape,
provide_data=test_data.provide_data, provide_label=test_data.provide_label,
arg_params=arg_params, aux_params=aux_params)
# start detection
pred_eval(predictor, test_data, imdb, cfg, vis=vis, ignore_cache=ignore_cache, thresh=thresh, logger=logger)