本文整理汇总了Python中core.config.cfg.DATA_DIR属性的典型用法代码示例。如果您正苦于以下问题:Python cfg.DATA_DIR属性的具体用法?Python cfg.DATA_DIR怎么用?Python cfg.DATA_DIR使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类core.config.cfg
的用法示例。
在下文中一共展示了cfg.DATA_DIR属性的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: parse_args
# 需要导入模块: from core.config import cfg [as 别名]
# 或者: from core.config.cfg import DATA_DIR [as 别名]
def parse_args():
"""Parser command line argumnets"""
parser = argparse.ArgumentParser(formatter_class=ColorHelpFormatter)
parser.add_argument('--output_dir', help='Directory to save downloaded weight files',
default=os.path.join(cfg.DATA_DIR, 'pretrained_model'))
parser.add_argument('-t', '--targets', nargs='+', metavar='file_name',
help='Files to download. Allowed values are: ' +
', '.join(map(lambda s: Fore.YELLOW + s + Fore.RESET,
list(PRETRAINED_WEIGHTS.keys()))),
choices=list(PRETRAINED_WEIGHTS.keys()),
default=list(PRETRAINED_WEIGHTS.keys()))
return parser.parse_args()
# ---------------------------------------------------------------------------- #
# Mapping from filename to google drive file_id
# ---------------------------------------------------------------------------- #
示例2: cache_path
# 需要导入模块: from core.config import cfg [as 别名]
# 或者: from core.config.cfg import DATA_DIR [as 别名]
def cache_path(self):
cache_path = os.path.abspath(os.path.join(cfg.DATA_DIR, 'cache'))
if not os.path.exists(cache_path):
os.makedirs(cache_path)
return cache_path
示例3: cache_path
# 需要导入模块: from core.config import cfg [as 别名]
# 或者: from core.config.cfg import DATA_DIR [as 别名]
def cache_path(self):
cache_path = os.path.abspath(os.path.join(cfg.DATA_DIR, 'cache'))
if cfg.TRAIN.GT_SCORES:
cache_path += '_gt-scores'
if cfg.TRAIN.JOINT_TRAINING:
cache_path += '_joint'
if not os.path.exists(cache_path):
os.makedirs(cache_path)
return cache_path
示例4: get_rel_counts
# 需要导入模块: from core.config import cfg [as 别名]
# 或者: from core.config.cfg import DATA_DIR [as 别名]
def get_rel_counts(ds_name, must_overlap=True):
"""
Get counts of all of the relations. Used for modeling directly P(rel | o1, o2)
:param train_data:
:param must_overlap:
:return:
"""
if ds_name.find('vg') >= 0:
with open(cfg.DATA_DIR + '/vg/rel_annotations_train.json') as f:
train_data = json.load(f)
elif ds_name.find('vrd') >= 0:
with open(cfg.DATA_DIR + '/vrd/new_annotations_train.json') as f:
train_data = json.load(f)
else:
raise NotImplementedError
fg_matrix = np.zeros((
cfg.MODEL.NUM_CLASSES - 1, # not include background
cfg.MODEL.NUM_CLASSES - 1, # not include background
cfg.MODEL.NUM_PRD_CLASSES + 1, # include background
), dtype=np.int64)
bg_matrix = np.zeros((
cfg.MODEL.NUM_CLASSES - 1, # not include background
cfg.MODEL.NUM_CLASSES - 1, # not include background
), dtype=np.int64)
for _, im_rels in train_data.items():
# get all object boxes
gt_box_to_label = {}
for i, rel in enumerate(im_rels):
sbj_box = box_utils.y1y2x1x2_to_x1y1x2y2(rel['subject']['bbox'])
obj_box = box_utils.y1y2x1x2_to_x1y1x2y2(rel['object']['bbox'])
sbj_lbl = rel['subject']['category'] # not include background
obj_lbl = rel['object']['category'] # not include background
prd_lbl = rel['predicate'] # not include background
if tuple(sbj_box) not in gt_box_to_label:
gt_box_to_label[tuple(sbj_box)] = sbj_lbl
if tuple(obj_box) not in gt_box_to_label:
gt_box_to_label[tuple(obj_box)] = obj_lbl
fg_matrix[sbj_lbl, obj_lbl, prd_lbl + 1] += 1
if cfg.MODEL.USE_OVLP_FILTER:
if len(gt_box_to_label):
gt_boxes = np.array(list(gt_box_to_label.keys()), dtype=np.int32)
gt_classes = np.array(list(gt_box_to_label.values()), dtype=np.int32)
o1o2_total = gt_classes[np.array(
box_filter(gt_boxes, must_overlap=must_overlap), dtype=int)]
for (o1, o2) in o1o2_total:
bg_matrix[o1, o2] += 1
else:
# consider all pairs of boxes, overlapped or non-overlapped
for b1, l1 in gt_box_to_label.items():
for b2, l2 in gt_box_to_label.items():
if b1 == b2:
continue
bg_matrix[l1, l2] += 1
return fg_matrix, bg_matrix
示例5: get_obj_prd_vecs
# 需要导入模块: from core.config import cfg [as 别名]
# 或者: from core.config.cfg import DATA_DIR [as 别名]
def get_obj_prd_vecs(dataset_name):
word2vec_model = gensim.models.KeyedVectors.load_word2vec_format(
cfg.DATA_DIR + '/word2vec_model/GoogleNews-vectors-negative300.bin', binary=True)
logger.info('Model loaded.')
# change everything into lowercase
all_keys = list(word2vec_model.vocab.keys())
for key in all_keys:
new_key = key.lower()
word2vec_model.vocab[new_key] = word2vec_model.vocab.pop(key)
logger.info('Wiki words converted to lowercase.')
if dataset_name.find('vrd') >= 0:
with open(cfg.DATA_DIR + '/vrd/objects.json') as f:
obj_cats = json.load(f)
with open(cfg.DATA_DIR + '/vrd/predicates.json') as f:
prd_cats = json.load(f)
elif dataset_name.find('vg') >= 0:
with open(cfg.DATA_DIR + '/vg/objects.json') as f:
obj_cats = json.load(f)
with open(cfg.DATA_DIR + '/vg/predicates.json') as f:
prd_cats = json.load(f)
else:
raise NotImplementedError
# represent background with the word 'unknown'
# obj_cats.insert(0, 'unknown')
prd_cats.insert(0, 'unknown')
all_obj_vecs = np.zeros((len(obj_cats), 300), dtype=np.float32)
for r, obj_cat in enumerate(obj_cats):
obj_words = obj_cat.split()
for word in obj_words:
raw_vec = word2vec_model[word]
all_obj_vecs[r] += (raw_vec / la.norm(raw_vec))
all_obj_vecs[r] /= len(obj_words)
logger.info('Object label vectors loaded.')
all_prd_vecs = np.zeros((len(prd_cats), 300), dtype=np.float32)
for r, prd_cat in enumerate(prd_cats):
prd_words = prd_cat.split()
for word in prd_words:
raw_vec = word2vec_model[word]
all_prd_vecs[r] += (raw_vec / la.norm(raw_vec))
all_prd_vecs[r] /= len(prd_words)
logger.info('Predicate label vectors loaded.')
return all_obj_vecs, all_prd_vecs