本文整理匯總了Python中config.config.config.symbol方法的典型用法代碼示例。如果您正苦於以下問題:Python config.symbol方法的具體用法?Python config.symbol怎麽用?Python config.symbol使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類config.config.config
的用法示例。
在下文中一共展示了config.symbol方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: main
# 需要導入模塊: from config.config import config [as 別名]
# 或者: from config.config.config import symbol [as 別名]
def main():
print ('Called with argument:', args)
ctx = [mx.gpu(int(i)) for i in config.gpus.split(',')]
logger, output_path = create_logger(config.output_path, args.cfg, config.dataset.image_set)
shutil.copy2(os.path.join(curr_path, 'symbols', config.symbol + '.py'), output_path)
prefix = os.path.join(output_path, 'rcnn')
logging.info('########## TRAIN rcnn WITH IMAGENET INIT AND RPN DETECTION')
train_rcnn(config, config.dataset.dataset, config.dataset.image_set, config.dataset.root_path, config.dataset.dataset_path,
args.frequent, config.default.kvstore, config.TRAIN.FLIP, config.TRAIN.SHUFFLE, config.TRAIN.RESUME,
ctx, config.network.pretrained, config.network.pretrained_epoch, prefix, config.TRAIN.begin_epoch,
config.TRAIN.end_epoch, train_shared=False, lr=config.TRAIN.lr, lr_step=config.TRAIN.lr_step,
proposal=config.dataset.proposal, logger=logger)
示例2: main
# 需要導入模塊: from config.config import config [as 別名]
# 或者: from config.config.config import symbol [as 別名]
def main():
print ('Called with argument:', args)
ctx = [mx.gpu(int(i)) for i in config.gpus.split(',')]
logger, output_path = create_logger(config.output_path, args.cfg, config.dataset.image_set)
shutil.copy2(os.path.join(curr_path, 'symbols', config.symbol + '.py'), output_path)
prefix = os.path.join(output_path, 'rfcn')
logging.info('########## TRAIN rfcn WITH IMAGENET INIT AND RPN DETECTION')
train_rcnn(config, config.dataset.dataset, config.dataset.image_set, config.dataset.root_path, config.dataset.dataset_path,
args.frequent, config.default.kvstore, config.TRAIN.FLIP, config.TRAIN.SHUFFLE, config.TRAIN.RESUME,
ctx, config.network.pretrained, config.network.pretrained_epoch, prefix, config.TRAIN.begin_epoch,
config.TRAIN.end_epoch, train_shared=False, lr=config.TRAIN.lr, lr_step=config.TRAIN.lr_step,
proposal=config.dataset.proposal, logger=logger)
示例3: main
# 需要導入模塊: from config.config import config [as 別名]
# 或者: from config.config.config import symbol [as 別名]
def main():
print ('Called with argument:', args)
ctx = [mx.gpu(int(i)) for i in config.gpus.split(',')]
logger, output_path = create_logger(config.output_path, args.cfg, config.dataset.image_set)
shutil.copy2(os.path.join(curr_path, 'symbols', config.symbol + '.py'), output_path)
assert config.TRAIN.END2END == False
prefix = os.path.join(output_path, config.TRAIN.model_prefix)
logging.info('########## TRAIN rcnn WITH IMAGENET INIT AND RPN DETECTION')
train_rcnn(config, config.dataset.dataset, config.dataset.image_set, config.dataset.root_path, config.dataset.dataset_path,
args.frequent, config.default.kvstore, config.TRAIN.FLIP, config.TRAIN.SHUFFLE, config.TRAIN.RESUME,
ctx, config.network.pretrained, config.network.pretrained_epoch, prefix, config.TRAIN.begin_epoch,
config.TRAIN.end_epoch, train_shared=False, lr=config.TRAIN.lr, lr_step=config.TRAIN.lr_step,
proposal=config.dataset.proposal, logger=logger, output_path=output_path)
示例4: test_deeplab
# 需要導入模塊: from config.config import config [as 別名]
# 或者: from config.config.config import symbol [as 別名]
def test_deeplab():
epoch = config.TEST.test_epoch
ctx = [mx.gpu(int(i)) for i in config.gpus.split(',')]
image_set = config.dataset.test_image_set
root_path = config.dataset.root_path
dataset = config.dataset.dataset
dataset_path = config.dataset.dataset_path
logger, final_output_path = create_logger(config.output_path, args.cfg, image_set)
prefix = os.path.join(final_output_path, '..', '_'.join([iset for iset in config.dataset.image_set.split('+')]), config.TRAIN.model_prefix)
# print config
pprint.pprint(config)
logger.info('testing config:{}\n'.format(pprint.pformat(config)))
# load symbol and testing data
sym_instance = eval(config.symbol + '.' + config.symbol)()
sym = sym_instance.get_symbol(config, is_train=False)
imdb = eval(dataset)(image_set, root_path, dataset_path, result_path=final_output_path)
segdb = imdb.gt_segdb()
# get test data iter
test_data = TestDataLoader(segdb, config=config, batch_size=len(ctx))
# infer shape
data_shape_dict = dict(test_data.provide_data_single)
sym_instance.infer_shape(data_shape_dict)
# load model and check parameters
arg_params, aux_params = load_param(prefix, epoch, process=True)
sym_instance.check_parameter_shapes(arg_params, aux_params, data_shape_dict, is_train=False)
# decide maximum shape
data_names = [k[0] for k in test_data.provide_data_single]
label_names = ['softmax_label']
max_data_shape = [[('data', (1, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]]
# create predictor
predictor = Predictor(sym, data_names, label_names,
context=ctx, max_data_shapes=max_data_shape,
provide_data=test_data.provide_data, provide_label=test_data.provide_label,
arg_params=arg_params, aux_params=aux_params)
# start detection
pred_eval(predictor, test_data, imdb, vis=args.vis, ignore_cache=args.ignore_cache, logger=logger)
示例5: main
# 需要導入模塊: from config.config import config [as 別名]
# 或者: from config.config.config import symbol [as 別名]
def main():
# get symbol
pprint.pprint(config)
sym_instance = eval(config.symbol + '.' + config.symbol)()
sym = sym_instance.get_symbol(config, is_train=False)
# load demo data
image_names = ['000240.jpg', '000437.jpg', '004072.jpg', '007912.jpg']
image_all = []
data = []
for im_name in image_names:
assert os.path.exists(cur_path + '/../demo/deform_conv/' + im_name), \
('%s does not exist'.format('../demo/deform_conv/' + im_name))
im = cv2.imread(cur_path + '/../demo/deform_conv/' + im_name, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
image_all.append(im)
target_size = config.SCALES[0][0]
max_size = config.SCALES[0][1]
im, im_scale = resize(im, target_size, max_size, stride=config.network.IMAGE_STRIDE)
im_tensor = transform(im, config.network.PIXEL_MEANS)
im_info = np.array([[im_tensor.shape[2], im_tensor.shape[3], im_scale]], dtype=np.float32)
data.append({'data': im_tensor, 'im_info': im_info})
# get predictor
data_names = ['data', 'im_info']
label_names = []
data = [[mx.nd.array(data[i][name]) for name in data_names] for i in xrange(len(data))]
max_data_shape = [[('data', (1, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]]
provide_data = [[(k, v.shape) for k, v in zip(data_names, data[i])] for i in xrange(len(data))]
provide_label = [None for i in xrange(len(data))]
arg_params, aux_params = load_param(cur_path + '/../model/deform_conv', 0, process=True)
predictor = Predictor(sym, data_names, label_names,
context=[mx.gpu(0)], max_data_shapes=max_data_shape,
provide_data=provide_data, provide_label=provide_label,
arg_params=arg_params, aux_params=aux_params)
# test
for idx, _ in enumerate(image_names):
data_batch = mx.io.DataBatch(data=[data[idx]], label=[], pad=0, index=idx,
provide_data=[[(k, v.shape) for k, v in zip(data_names, data[idx])]],
provide_label=[None])
output = predictor.predict(data_batch)
res5a_offset = output[0]['res5a_branch2b_offset_output'].asnumpy()
res5b_offset = output[0]['res5b_branch2b_offset_output'].asnumpy()
res5c_offset = output[0]['res5c_branch2b_offset_output'].asnumpy()
im = image_all[idx]
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
show_dconv_offset(im, [res5c_offset, res5b_offset, res5a_offset])
示例6: main
# 需要導入模塊: from config.config import config [as 別名]
# 或者: from config.config.config import symbol [as 別名]
def main():
# get symbol
pprint.pprint(config)
sym_instance = eval(config.symbol + '.' + config.symbol)()
sym = sym_instance.get_symbol_rfcn(config, is_train=False)
# load demo data
image_names = ['000057.jpg', '000149.jpg', '000351.jpg', '002535.jpg']
image_all = []
# ground truth boxes
gt_boxes_all = [np.array([[132, 52, 384, 357]]), np.array([[113, 1, 350, 360]]),
np.array([[0, 27, 329, 155]]), np.array([[8, 40, 499, 289]])]
gt_classes_all = [np.array([3]), np.array([16]), np.array([7]), np.array([12])]
data = []
for idx, im_name in enumerate(image_names):
assert os.path.exists(cur_path + '/../demo/deform_psroi/' + im_name), \
('%s does not exist'.format('../demo/deform_psroi/' + im_name))
im = cv2.imread(cur_path + '/../demo/deform_psroi/' + im_name, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
image_all.append(im)
target_size = config.SCALES[0][0]
max_size = config.SCALES[0][1]
im, im_scale = resize(im, target_size, max_size, stride=config.network.IMAGE_STRIDE)
im_tensor = transform(im, config.network.PIXEL_MEANS)
gt_boxes = gt_boxes_all[idx]
gt_boxes = np.round(gt_boxes * im_scale)
data.append({'data': im_tensor, 'rois': np.hstack((np.zeros((gt_boxes.shape[0], 1)), gt_boxes))})
# get predictor
data_names = ['data', 'rois']
label_names = []
data = [[mx.nd.array(data[i][name]) for name in data_names] for i in xrange(len(data))]
max_data_shape = [[('data', (1, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]]
provide_data = [[(k, v.shape) for k, v in zip(data_names, data[i])] for i in xrange(len(data))]
provide_label = [None for i in xrange(len(data))]
arg_params, aux_params = load_param(cur_path + '/../model/deform_psroi', 0, process=True)
predictor = Predictor(sym, data_names, label_names,
context=[mx.gpu(0)], max_data_shapes=max_data_shape,
provide_data=provide_data, provide_label=provide_label,
arg_params=arg_params, aux_params=aux_params)
# test
for idx, _ in enumerate(image_names):
data_batch = mx.io.DataBatch(data=[data[idx]], label=[], pad=0, index=idx,
provide_data=[[(k, v.shape) for k, v in zip(data_names, data[idx])]],
provide_label=[None])
output = predictor.predict(data_batch)
cls_offset = output[0]['rfcn_cls_offset_output'].asnumpy()
im = image_all[idx]
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
boxes = gt_boxes_all[idx]
show_dpsroi_offset(im, boxes, cls_offset, gt_classes_all[idx])