本文整理匯總了Python中config.cfg.DATA_DIR屬性的典型用法代碼示例。如果您正苦於以下問題:Python cfg.DATA_DIR屬性的具體用法?Python cfg.DATA_DIR怎麽用?Python cfg.DATA_DIR使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類config.cfg
的用法示例。
在下文中一共展示了cfg.DATA_DIR屬性的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: Get_Next_Instance_HO_Neg_HICO
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import DATA_DIR [as 別名]
def Get_Next_Instance_HO_Neg_HICO(trainval_GT, Trainval_Neg, iter, Pos_augment, Neg_select, Data_length):
GT = trainval_GT[iter%Data_length]
image_id = GT[0]
im_file = cfg.DATA_DIR + '/' + 'hico_20160224_det/images/train2015/HICO_train2015_' + (str(image_id)).zfill(8) + '.jpg'
im = cv2.imread(im_file)
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_orig = im_orig.reshape(1, im_shape[0], im_shape[1], 3)
Pattern, Human_augmented, Object_augmented, action_HO, num_pos = Augmented_HO_Neg_HICO(GT, Trainval_Neg, im_shape, Pos_augment, Neg_select)
blobs = {}
blobs['image'] = im_orig
blobs['H_boxes'] = Human_augmented
blobs['O_boxes'] = Object_augmented
blobs['gt_class_HO'] = action_HO
blobs['sp'] = Pattern
blobs['H_num'] = num_pos
return blobs
示例2: parse_arg
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import DATA_DIR [as 別名]
def parse_arg():
"""
parse input arguments
"""
parser = argparse.ArgumentParser(description="Train CapsNet")
parser.add_argument('--data_dir', dest='data_dir',
type=str, default=cfg.DATA_DIR,
help='Directory for storing input data')
parser.add_argument('--ckpt', dest='ckpt',
type=str, default=None,
help='path to the directory of check point')
parser.add_argument('--max_iters', dest='max_iters', type=int,
default=10000, help='max of training iterations')
parser.add_argument('--batch_size', dest='batch_size', type=int,
default=100, help='training batch size')
# if len(sys.argv) == 1:
# parser.print_help()
# sys.exit(1)
args = parser.parse_args()
return args
示例3: process_glove
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import DATA_DIR [as 別名]
def process_glove(vocab_list, save_path, size=4e5, random_init=True):
"""
:param vocab_list: [vocab]
:return:
"""
if not gfile.Exists(save_path + ".npz"):
glove_path = os.path.join(cfg.DATA_DIR, "glove.6B.{}d.txt".format(cfg.GLOVE_DIM))
if random_init:
glove = np.random.randn(len(vocab_list), cfg.GLOVE_DIM)
else:
glove = np.zeros((len(vocab_list), cfg.GLOVE_DIM))
found = 0
with open(glove_path, 'r') as fh:
for line in tqdm(fh, total=size):
array = line.lstrip().rstrip().split(" ")
word = array[0]
vector = list(map(float, array[1:]))
if word in vocab_list:
idx = vocab_list.index(word)
glove[idx, :] = vector
found += 1
if word.capitalize() in vocab_list:
idx = vocab_list.index(word.capitalize())
glove[idx, :] = vector
found += 1
if word.upper() in vocab_list:
idx = vocab_list.index(word.upper())
glove[idx, :] = vector
found += 1
print("{}/{} of word vocab have corresponding vectors in {}".format(found, len(vocab_list), glove_path))
np.savez_compressed(save_path, glove=glove)
print("saved trimmed glove matrix at: {}".format(save_path))
示例4: Get_Next_Instance_HO_Neg
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import DATA_DIR [as 別名]
def Get_Next_Instance_HO_Neg(trainval_GT, Trainval_Neg, iter, Pos_augment, Neg_select, Data_length):
GT = trainval_GT[iter%Data_length]
image_id = GT[0]
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/train2014/COCO_train2014_' + (str(image_id)).zfill(12) + '.jpg'
im = cv2.imread(im_file)
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_orig = im_orig.reshape(1, im_shape[0], im_shape[1], 3)
Pattern, Human_augmented, Human_augmented_solo, Object_augmented, action_HO, action_H, mask_HO, mask_H = Augmented_HO_Neg(GT, Trainval_Neg, im_shape, Pos_augment, Neg_select)
blobs = {}
blobs['image'] = im_orig
blobs['H_boxes_solo']= Human_augmented_solo
blobs['H_boxes'] = Human_augmented
blobs['O_boxes'] = Object_augmented
blobs['gt_class_HO'] = action_HO
blobs['gt_class_H'] = action_H
blobs['Mask_HO'] = mask_HO
blobs['Mask_H'] = mask_H
blobs['sp'] = Pattern
blobs['H_num'] = len(action_H)
return blobs
示例5: Get_Next_Instance_HO_spNeg
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import DATA_DIR [as 別名]
def Get_Next_Instance_HO_spNeg(trainval_GT, Trainval_Neg, iter, Pos_augment, Neg_select, Data_length):
GT = trainval_GT[iter%Data_length]
image_id = GT[0]
im_file = cfg.DATA_DIR + '/' + 'v-coco/coco/images/train2014/COCO_train2014_' + (str(image_id)).zfill(12) + '.jpg'
im = cv2.imread(im_file)
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_orig = im_orig.reshape(1, im_shape[0], im_shape[1], 3)
Pattern, Human_augmented_sp, Human_augmented, Object_augmented, action_sp, action_HO, action_H, mask_sp, mask_HO, mask_H = Augmented_HO_spNeg(GT, Trainval_Neg, im_shape, Pos_augment, Neg_select)
blobs = {}
blobs['image'] = im_orig
blobs['H_boxes'] = Human_augmented
blobs['Hsp_boxes'] = Human_augmented_sp
blobs['O_boxes'] = Object_augmented
blobs['gt_class_sp'] = action_sp
blobs['gt_class_HO'] = action_HO
blobs['gt_class_H'] = action_H
blobs['Mask_sp'] = mask_sp
blobs['Mask_HO'] = mask_HO
blobs['Mask_H'] = mask_H
blobs['sp'] = Pattern
blobs['H_num'] = len(action_H)
return blobs
示例6: align_model
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import DATA_DIR [as 別名]
def align_model(self):
blender_model = self.load_ply_model(self.blender_model_path)
orig_model = self.load_orig_model()
blender_model = np.dot(blender_model, self.rotation_transform.T)
blender_model += (np.mean(orig_model, axis=0) - np.mean(blender_model, axis=0))
np.savetxt(os.path.join(cfg.DATA_DIR, 'blender_model.txt'), blender_model)
np.savetxt(os.path.join(cfg.DATA_DIR, 'orig_model.txt'), orig_model)
示例7: __init__
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import DATA_DIR [as 別名]
def __init__(self, class_type):
self.class_type = class_type
self.mask_path = os.path.join(cfg.LINEMOD,'{}/mask/*.png'.format(class_type))
self.dir_path = os.path.join(cfg.LINEMOD_ORIG,'{}/data'.format(class_type))
dataset_pose_dir_path = os.path.join(cfg.DATA_DIR, 'dataset_poses')
os.system('mkdir -p {}'.format(dataset_pose_dir_path))
self.dataset_poses_path = os.path.join(dataset_pose_dir_path, '{}_poses.npy'.format(class_type))
blender_pose_dir_path = os.path.join(cfg.DATA_DIR, 'blender_poses')
os.system('mkdir -p {}'.format(blender_pose_dir_path))
self.blender_poses_path = os.path.join(blender_pose_dir_path, '{}_poses.npy'.format(class_type))
os.system('mkdir -p {}'.format(blender_pose_dir_path))
self.pose_transformer = PoseTransformer(class_type)
示例8: parse_arg
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import DATA_DIR [as 別名]
def parse_arg():
"""
parse input arguments
"""
parser = argparse.ArgumentParser(description="Train CapsNet")
parser.add_argument('--data_dir', dest='data_dir',
type=str, default=cfg.DATA_DIR,
help='Directory for storing input data')
parser.add_argument('--ckpt', dest='ckpt',
type=str, default=cfg.TRAIN_DIR,
help='path to the directory of check point')
parser.add_argument('--mode', dest='mode',
type=str, default=None,
help='evaluation mode: reconstruct, cap_tweak, adversarial')
parser.add_argument('--batch_size', dest='batch_size', type=int,
default=30, help='batch size for reconstruct evaluation')
parser.add_argument('--max_iters', dest='max_iters', type=int,
default=50, help='batch size for reconstruct evaluation')
parser.add_argument('--tweak_target', dest='tweak_target', type=int,
default=None, help='target number for capsule tweaking experiment')
parser.add_argument('--fig_dir', dest='fig_dir', type=str,
default='../figs', help='directory to save figures')
parser.add_argument('--lr', dest='lr', type=float,
default=1, help='learning rate of adversarial test')
args = parser.parse_args()
if len(sys.argv) == 1 or \
args.mode not in \
('reconstruct', 'cap_tweak', 'adversarial'):
parser.print_help()
sys.exit(1)
return args