本文整理汇总了Python中config.config.config.SCALES属性的典型用法代码示例。如果您正苦于以下问题:Python config.SCALES属性的具体用法?Python config.SCALES怎么用?Python config.SCALES使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类config.config.config
的用法示例。
在下文中一共展示了config.SCALES属性的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_deeplab
# 需要导入模块: from config.config import config [as 别名]
# 或者: from config.config.config import SCALES [as 别名]
def test_deeplab(network, dataset, image_set, root_path, dataset_path,
ctx, prefix, epoch,
vis, logger=None, output_path=None):
if not logger:
assert False, 'require a logger'
# print config
pprint.pprint(config)
logger.info('testing config:{}\n'.format(pprint.pformat(config)))
# load symbol and testing data
sym = eval('get_' + network + '_test')(num_classes=config.dataset.NUM_CLASSES)
imdb = eval(dataset)(image_set, root_path, dataset_path, result_path=output_path)
segdb = imdb.gt_segdb()
# get test data iter
test_data = TestDataLoader(segdb, batch_size=len(ctx))
# load model
# arg_params, aux_params = load_param(prefix, epoch, convert=True, ctx=ctx, process=True)
arg_params, aux_params = load_param(prefix, epoch, process=True)
# infer shape
data_shape_dict = dict(test_data.provide_data_single)
arg_shape, _, aux_shape = sym.infer_shape(**data_shape_dict)
arg_shape_dict = dict(zip(sym.list_arguments(), arg_shape))
aux_shape_dict = dict(zip(sym.list_auxiliary_states(), aux_shape))
# check parameters
for k in sym.list_arguments():
if k in data_shape_dict or k in ['softmax_label']:
continue
assert k in arg_params, k + ' not initialized'
assert arg_params[k].shape == arg_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(arg_shape_dict[k]) + ' provided ' + str(arg_params[k].shape)
for k in sym.list_auxiliary_states():
assert k in aux_params, k + ' not initialized'
assert aux_params[k].shape == aux_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(aux_shape_dict[k]) + ' provided ' + str(aux_params[k].shape)
# decide maximum shape
data_names = [k[0] for k in test_data.provide_data_single]
label_names = ['softmax_label']
max_data_shape = [[('data', (1, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]]
# create predictor
predictor = Predictor(sym, data_names, label_names,
context=ctx, max_data_shapes=max_data_shape,
provide_data=test_data.provide_data, provide_label=test_data.provide_label,
arg_params=arg_params, aux_params=aux_params)
# start detection
pred_eval(predictor, test_data, imdb, vis=vis, logger=logger)
示例2: test_deeplab
# 需要导入模块: from config.config import config [as 别名]
# 或者: from config.config.config import SCALES [as 别名]
def test_deeplab():
epoch = config.TEST.test_epoch
ctx = [mx.gpu(int(i)) for i in config.gpus.split(',')]
image_set = config.dataset.test_image_set
root_path = config.dataset.root_path
dataset = config.dataset.dataset
dataset_path = config.dataset.dataset_path
logger, final_output_path = create_logger(config.output_path, args.cfg, image_set)
prefix = os.path.join(final_output_path, '..', '_'.join([iset for iset in config.dataset.image_set.split('+')]), config.TRAIN.model_prefix)
# print config
pprint.pprint(config)
logger.info('testing config:{}\n'.format(pprint.pformat(config)))
# load symbol and testing data
sym_instance = eval(config.symbol + '.' + config.symbol)()
sym = sym_instance.get_symbol(config, is_train=False)
imdb = eval(dataset)(image_set, root_path, dataset_path, result_path=final_output_path)
segdb = imdb.gt_segdb()
# get test data iter
test_data = TestDataLoader(segdb, config=config, batch_size=len(ctx))
# infer shape
data_shape_dict = dict(test_data.provide_data_single)
sym_instance.infer_shape(data_shape_dict)
# load model and check parameters
arg_params, aux_params = load_param(prefix, epoch, process=True)
sym_instance.check_parameter_shapes(arg_params, aux_params, data_shape_dict, is_train=False)
# decide maximum shape
data_names = [k[0] for k in test_data.provide_data_single]
label_names = ['softmax_label']
max_data_shape = [[('data', (1, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]]
# create predictor
predictor = Predictor(sym, data_names, label_names,
context=ctx, max_data_shapes=max_data_shape,
provide_data=test_data.provide_data, provide_label=test_data.provide_label,
arg_params=arg_params, aux_params=aux_params)
# start detection
pred_eval(predictor, test_data, imdb, vis=args.vis, ignore_cache=args.ignore_cache, logger=logger)
示例3: main
# 需要导入模块: from config.config import config [as 别名]
# 或者: from config.config.config import SCALES [as 别名]
def main():
# get symbol
pprint.pprint(config)
sym_instance = eval(config.symbol + '.' + config.symbol)()
sym = sym_instance.get_symbol(config, is_train=False)
# load demo data
image_names = ['000240.jpg', '000437.jpg', '004072.jpg', '007912.jpg']
image_all = []
data = []
for im_name in image_names:
assert os.path.exists(cur_path + '/../demo/deform_conv/' + im_name), \
('%s does not exist'.format('../demo/deform_conv/' + im_name))
im = cv2.imread(cur_path + '/../demo/deform_conv/' + im_name, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
image_all.append(im)
target_size = config.SCALES[0][0]
max_size = config.SCALES[0][1]
im, im_scale = resize(im, target_size, max_size, stride=config.network.IMAGE_STRIDE)
im_tensor = transform(im, config.network.PIXEL_MEANS)
im_info = np.array([[im_tensor.shape[2], im_tensor.shape[3], im_scale]], dtype=np.float32)
data.append({'data': im_tensor, 'im_info': im_info})
# get predictor
data_names = ['data', 'im_info']
label_names = []
data = [[mx.nd.array(data[i][name]) for name in data_names] for i in xrange(len(data))]
max_data_shape = [[('data', (1, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]]
provide_data = [[(k, v.shape) for k, v in zip(data_names, data[i])] for i in xrange(len(data))]
provide_label = [None for i in xrange(len(data))]
arg_params, aux_params = load_param(cur_path + '/../model/deform_conv', 0, process=True)
predictor = Predictor(sym, data_names, label_names,
context=[mx.gpu(0)], max_data_shapes=max_data_shape,
provide_data=provide_data, provide_label=provide_label,
arg_params=arg_params, aux_params=aux_params)
# test
for idx, _ in enumerate(image_names):
data_batch = mx.io.DataBatch(data=[data[idx]], label=[], pad=0, index=idx,
provide_data=[[(k, v.shape) for k, v in zip(data_names, data[idx])]],
provide_label=[None])
output = predictor.predict(data_batch)
res5a_offset = output[0]['res5a_branch2b_offset_output'].asnumpy()
res5b_offset = output[0]['res5b_branch2b_offset_output'].asnumpy()
res5c_offset = output[0]['res5c_branch2b_offset_output'].asnumpy()
im = image_all[idx]
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
show_dconv_offset(im, [res5c_offset, res5b_offset, res5a_offset])
示例4: main
# 需要导入模块: from config.config import config [as 别名]
# 或者: from config.config.config import SCALES [as 别名]
def main():
# get symbol
pprint.pprint(config)
sym_instance = eval(config.symbol + '.' + config.symbol)()
sym = sym_instance.get_symbol_rfcn(config, is_train=False)
# load demo data
image_names = ['000057.jpg', '000149.jpg', '000351.jpg', '002535.jpg']
image_all = []
# ground truth boxes
gt_boxes_all = [np.array([[132, 52, 384, 357]]), np.array([[113, 1, 350, 360]]),
np.array([[0, 27, 329, 155]]), np.array([[8, 40, 499, 289]])]
gt_classes_all = [np.array([3]), np.array([16]), np.array([7]), np.array([12])]
data = []
for idx, im_name in enumerate(image_names):
assert os.path.exists(cur_path + '/../demo/deform_psroi/' + im_name), \
('%s does not exist'.format('../demo/deform_psroi/' + im_name))
im = cv2.imread(cur_path + '/../demo/deform_psroi/' + im_name, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
image_all.append(im)
target_size = config.SCALES[0][0]
max_size = config.SCALES[0][1]
im, im_scale = resize(im, target_size, max_size, stride=config.network.IMAGE_STRIDE)
im_tensor = transform(im, config.network.PIXEL_MEANS)
gt_boxes = gt_boxes_all[idx]
gt_boxes = np.round(gt_boxes * im_scale)
data.append({'data': im_tensor, 'rois': np.hstack((np.zeros((gt_boxes.shape[0], 1)), gt_boxes))})
# get predictor
data_names = ['data', 'rois']
label_names = []
data = [[mx.nd.array(data[i][name]) for name in data_names] for i in xrange(len(data))]
max_data_shape = [[('data', (1, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]]
provide_data = [[(k, v.shape) for k, v in zip(data_names, data[i])] for i in xrange(len(data))]
provide_label = [None for i in xrange(len(data))]
arg_params, aux_params = load_param(cur_path + '/../model/deform_psroi', 0, process=True)
predictor = Predictor(sym, data_names, label_names,
context=[mx.gpu(0)], max_data_shapes=max_data_shape,
provide_data=provide_data, provide_label=provide_label,
arg_params=arg_params, aux_params=aux_params)
# test
for idx, _ in enumerate(image_names):
data_batch = mx.io.DataBatch(data=[data[idx]], label=[], pad=0, index=idx,
provide_data=[[(k, v.shape) for k, v in zip(data_names, data[idx])]],
provide_label=[None])
output = predictor.predict(data_batch)
cls_offset = output[0]['rfcn_cls_offset_output'].asnumpy()
im = image_all[idx]
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
boxes = gt_boxes_all[idx]
show_dpsroi_offset(im, boxes, cls_offset, gt_classes_all[idx])