本文整理匯總了Python中model.config.cfg.MATLAB屬性的典型用法代碼示例。如果您正苦於以下問題:Python cfg.MATLAB屬性的具體用法?Python cfg.MATLAB怎麽用?Python cfg.MATLAB使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類model.config.cfg
的用法示例。
在下文中一共展示了cfg.MATLAB屬性的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _do_matlab_eval
# 需要導入模塊: from model.config import cfg [as 別名]
# 或者: from model.config.cfg import MATLAB [as 別名]
def _do_matlab_eval(self, output_dir='output'):
print('-----------------------------------------------------')
print('Computing results with the official MATLAB eval code.')
print('-----------------------------------------------------')
path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \
.format(self._devkit_path, self._get_comp_id(),
self._image_set, output_dir)
print(('Running:\n{}'.format(cmd)))
status = subprocess.call(cmd, shell=True)
開發者ID:Sunarker,項目名稱:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代碼行數:16,代碼來源:pascal_voc.py
示例2: _do_matlab_eval
# 需要導入模塊: from model.config import cfg [as 別名]
# 或者: from model.config.cfg import MATLAB [as 別名]
def _do_matlab_eval(self, output_dir='output'):
print('-----------------------------------------------------')
print('Computing results with the official MATLAB eval code.')
print('-----------------------------------------------------')
path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \
.format(self._devkit_path, self._get_comp_id(),
self._image_set, output_dir)
print(('Running:\n{}'.format(cmd)))
status = subprocess.call(cmd, shell=True)
示例3: _do_matlab_eval
# 需要導入模塊: from model.config import cfg [as 別名]
# 或者: from model.config.cfg import MATLAB [as 別名]
def _do_matlab_eval(self, output_dir='output'):
print('-----------------------------------------------------')
print('Computing results with the official MATLAB eval code.')
print('-----------------------------------------------------')
path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \
.format(self._dist_path,
self._image_set, output_dir)
print(('Running:\n{}'.format(cmd)))
status = subprocess.call(cmd, shell=True)
示例4: _do_matlab_eval
# 需要導入模塊: from model.config import cfg [as 別名]
# 或者: from model.config.cfg import MATLAB [as 別名]
def _do_matlab_eval(self, output_dir='output', suffix=''):
print('-----------------------------------------------------')
print('Computing results with the official MATLAB eval code.')
print('-----------------------------------------------------')
path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',
'KAISTdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'kaist_eval_full(\'{:s}\',\'{:s}\'); quit;"' \
.format(os.path.join(output_dir, 'det'+suffix), self._data_path)
print(('Running:\n{}'.format(cmd)))
status = subprocess.call(cmd, shell=True)
示例5: _do_python_eval
# 需要導入模塊: from model.config import cfg [as 別名]
# 或者: from model.config.cfg import MATLAB [as 別名]
def _do_python_eval(self, output_dir='output'):
annopath = os.path.join(
self._devkit_path,
'VOC' + self._year,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._devkit_path,
'VOC' + self._year,
'ImageSets',
'Main',
self._image_set + '.txt')
cachedir = os.path.join(self._devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self._year) < 2010 else False
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
rec, prec, ap = voc_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric, use_diff=self.config['use_diff'])
aps += [ap]
print(('AP for {} = {:.4f}'.format(cls, ap)))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print(('Mean AP = {:.4f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print(('{:.3f}'.format(ap)))
print(('{:.3f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
開發者ID:Sunarker,項目名稱:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代碼行數:46,代碼來源:pascal_voc.py
示例6: _do_python_eval
# 需要導入模塊: from model.config import cfg [as 別名]
# 或者: from model.config.cfg import MATLAB [as 別名]
def _do_python_eval(self, output_dir='output'):
annopath = os.path.join(
self._devkit_path,
'VOC' + self._year,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._devkit_path,
'VOC' + self._year,
'ImageSets',
'Main',
self._image_set + '.txt')
cachedir = os.path.join(self._devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self._year) < 2010 else False
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
rec, prec, ap = voc_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric, use_diff=self.config['use_diff'])
aps += [ap]
print(('AP for {} = {:.4f}'.format(cls, ap)))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print(('Mean AP = {:.4f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print(('{:.3f}'.format(ap)))
print(('{:.3f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
示例7: _do_python_eval
# 需要導入模塊: from model.config import cfg [as 別名]
# 或者: from model.config.cfg import MATLAB [as 別名]
def _do_python_eval(self, output_dir='output'):
annopath = os.path.join(
self._dist_path,
'coco_multi' ,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._dist_path,
self._image_set + '.txt')
cachedir = os.path.join(self._dist_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
#use_07_metric = True if int(self._year) < 2010 else False
use_07_metric = False
print('dist metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__' or cls == self.classes[0]:
cls_ind=0
continue
else:
cls_ind=self._class_to_ind[cls]
#elif cls=='median_filtering':
#cls_ind=3
#continue
filename = self._get_voc_results_file_template().format(cls)
filename2 = self._get_voc_noise_results_file_template().format(cls)
print(cls_ind)
rec, prec, ap = voc_eval(
filename,filename2, annopath, imagesetfile, cls_ind, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric,fuse=False)
aps += [ap]
print(('AP for {} = {:.4f},recall = {:.4f}, precision = {:.4f}'.format(cls, ap,rec[-1],prec[-1])))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
fig=plt.figure()
plt.plot(rec,prec)
fig.suptitle('PR curve for {} detection'.format(cls),fontsize=20)
plt.xlabel('recall',fontsize=15)
plt.xlim((0,1.0))
plt.ylim((0,1.0))
plt.ylabel('precision',fontsize=15)
fig.savefig('{}.jpg'.format(cls))
print(('Mean AP = {:.4f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print(('{:.3f}'.format(ap)))
print(('{:.3f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
示例8: _do_python_eval
# 需要導入模塊: from model.config import cfg [as 別名]
# 或者: from model.config.cfg import MATLAB [as 別名]
def _do_python_eval(self, output_dir='output'):
annopath = os.path.join(
self._dist_path,
'coco_multi' ,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._dist_path,
self._image_set + '.txt')
cachedir = os.path.join(self._dist_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
#use_07_metric = True if int(self._year) < 2010 else False
use_07_metric = False
print('dist metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__' or cls == self.classes[0]:
cls_ind=0
continue
else:
cls_ind=self._class_to_ind[cls]
#elif cls=='median_filtering':
#cls_ind=3
#continue
filename = self._get_voc_results_file_template().format(cls)
filename2 = self._get_voc_noise_results_file_template().format(cls)
print(cls_ind)
rec, prec, ap = voc_eval(
filename,filename2, annopath, imagesetfile, cls_ind, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric,fuse=False)
aps += [ap]
print(('AP for {} = {:.4f},recall = {:.4f}, precision = {:.4f}'.format(cls, ap,rec[-1],prec[-1])))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
fig=plt.figure()
plt.plot(rec,prec)
fig.suptitle('PR curve for {} detection'.format(cls),fontsize=20)
plt.xlabel('recall',fontsize=15)
plt.xlim((0,1.0))
plt.ylim((0,1.0))
plt.ylabel('precision',fontsize=15)
fig.savefig('{}.png'.format(cls))
print(('Mean AP = {:.4f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print(('{:.3f}'.format(ap)))
print(('{:.3f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
示例9: _do_python_eval
# 需要導入模塊: from model.config import cfg [as 別名]
# 或者: from model.config.cfg import MATLAB [as 別名]
def _do_python_eval(self, output_dir='output'):
annopath = os.path.join(
'/home-3/pengzhou@umd.edu/work/pengzhou/dataset',
'coco_multi' ,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
'/home-3/pengzhou@umd.edu/work/pengzhou/dataset',
self._image_set + '.txt')
cachedir = os.path.join('/home-3/pengzhou@umd.edu/work/pengzhou/dataset', 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
#use_07_metric = True if int(self._year) < 2010 else False
use_07_metric = False
print('dist metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__' or cls == self.classes[0]:
cls_ind=0
continue
else:
cls_ind=self._class_to_ind[cls]
#elif cls=='median_filtering':
#cls_ind=3
#continue
filename = self._get_voc_results_file_template().format(cls)
filename2 = self._get_voc_noise_results_file_template().format(cls)
print(cls_ind)
rec, prec, ap = voc_eval(
filename,filename2, annopath, imagesetfile, cls_ind, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric,fuse=False)
aps += [ap]
print(('AP for {} = {:.4f},recall = {:.4f}, precision = {:.4f}'.format(cls, ap,rec[-1],prec[-1])))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
fig=plt.figure()
plt.plot(rec,prec)
fig.suptitle('PR curve for {} detection'.format(cls),fontsize=20)
plt.xlabel('recall',fontsize=15)
plt.xlim((0,1.0))
plt.ylim((0,1.0))
plt.ylabel('precision',fontsize=15)
fig.savefig('{}.jpg'.format(cls))
print(('Mean AP = {:.4f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print(('{:.3f}'.format(ap)))
print(('{:.3f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
示例10: _do_python_eval
# 需要導入模塊: from model.config import cfg [as 別名]
# 或者: from model.config.cfg import MATLAB [as 別名]
def _do_python_eval(self, output_dir='output'):
annopath = os.path.join(
self._devkit_path,
'VOC' + self._year,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._devkit_path,
'VOC' + self._year,
'ImageSets',
'Main',
self._image_set + '.txt')
cachedir = os.path.join(self._devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self._year) < 2010 else False
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
rec, prec, ap = voc_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric)
aps += [ap]
print(('AP for {} = {:.4f}'.format(cls, ap)))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print(('Mean AP = {:.4f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print(('{:.3f}'.format(ap)))
print(('{:.3f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
示例11: _do_python_eval
# 需要導入模塊: from model.config import cfg [as 別名]
# 或者: from model.config.cfg import MATLAB [as 別名]
def _do_python_eval(self, output_dir='output'):
annopath = os.path.join(
self._dist_path,
'coco_multi' ,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._dist_path,
self._image_set + '.txt')
cachedir = os.path.join(self._dist_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
#use_07_metric = True if int(self._year) < 2010 else False
use_07_metric = False
print('dist metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__' or cls == self.classes[0]:
cls_ind=0
continue
else:
cls_ind=self._class_to_ind[cls]
#elif cls=='median_filtering':
#cls_ind=3
#continue
filename = self._get_voc_results_file_template().format(cls)
filename2 = self._get_voc_noise_results_file_template().format(cls)
#print(cls_ind)
rec, prec, ap = voc_eval(
filename,filename2, annopath, imagesetfile, cls_ind, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric,fuse=False)
aps += [ap]
print(('AP for {} = {:.4f},recall = {:.4f}, precision = {:.4f}'.format(cls, ap,rec[-1],prec[-1])))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
fig=plt.figure()
plt.plot(rec,prec)
fig.suptitle('PR curve for {} detection'.format(cls),fontsize=20)
plt.xlabel('recall',fontsize=15)
plt.xlim((0,1.0))
plt.ylim((0,1.0))
plt.ylabel('precision',fontsize=15)
fig.savefig('{}.png'.format(cls))
print(('Mean AP = {:.4f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print(('{:.3f}'.format(ap)))
print(('{:.3f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
示例12: _do_python_eval
# 需要導入模塊: from model.config import cfg [as 別名]
# 或者: from model.config.cfg import MATLAB [as 別名]
def _do_python_eval(self, output_dir=None):
annopath = os.path.join(
self._devkit_path,
'VOC' + self._year,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._devkit_path,
'VOC' + self._year,
'ImageSets',
'Main',
self._image_set + '.txt')
cachedir = os.path.join(self._devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self._year) < 2010 else False
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if output_dir is not None and not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
rec, prec, ap = voc_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric, use_diff=self.config['use_diff'])
aps += [ap]
print(('AP for {} = {:.4f}'.format(cls, ap)))
if output_dir is not None:
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print(('Mean AP = {:.4f}'.format(np.mean(aps))))
print('~~~~~~~~')
'''
print('Results:')
for ap in aps:
print(('{:.3f}'.format(ap)))
print(('{:.3f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
'''