本文整理匯總了Python中config.cfg.freeze方法的典型用法代碼示例。如果您正苦於以下問題:Python cfg.freeze方法的具體用法?Python cfg.freeze怎麽用?Python cfg.freeze使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類config.cfg
的用法示例。
在下文中一共展示了cfg.freeze方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: main
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import freeze [as 別名]
def main():
parser = argparse.ArgumentParser(description="ReID Baseline Inference")
parser.add_argument(
"--config_file", default="", help="path to config file", type=str
)
parser.add_argument("opts", help="Modify config options using the command-line", default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
if args.config_file != "":
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if output_dir and not os.path.exists(output_dir):
mkdir(output_dir)
logger = setup_logger("reid_baseline", output_dir, 0)
logger.info("Using {} GPUS".format(num_gpus))
logger.info(args)
if args.config_file != "":
logger.info("Loaded configuration file {}".format(args.config_file))
with open(args.config_file, 'r') as cf:
config_str = "\n" + cf.read()
logger.info(config_str)
logger.info("Running with config:\n{}".format(cfg))
if cfg.MODEL.DEVICE == "cuda":
os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
cudnn.benchmark = True
train_loader, val_loader, num_query, num_classes = make_data_loader(cfg)
model = build_model(cfg, num_classes)
model.load_param(cfg.TEST.WEIGHT)
inference(cfg, model, val_loader, num_query)
示例2: train
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import freeze [as 別名]
def train(args):
if args.config_file != "":
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if output_dir and not os.path.exists(output_dir):
os.makedirs(output_dir)
shutil.copy(args.config_file, cfg.OUTPUT_DIR)
num_gpus = torch.cuda.device_count()
logger = setup_logger('reid_baseline', output_dir, 0)
logger.info('Using {} GPUS'.format(num_gpus))
logger.info(args)
logger.info('Running with config:\n{}'.format(cfg))
train_dl, val_dl, num_query, num_classes = make_dataloader(cfg, num_gpus)
model = build_model(cfg, num_classes)
loss_func = make_loss(cfg, num_classes)
trainer = BaseTrainer(cfg, model, train_dl, val_dl,
loss_func, num_query, num_gpus)
for epoch in range(trainer.epochs):
for batch in trainer.train_dl:
trainer.step(batch)
trainer.handle_new_batch()
trainer.handle_new_epoch()
示例3: main
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import freeze [as 別名]
def main():
parser = argparse.ArgumentParser(description="ReID Baseline Training")
parser.add_argument(
"--config_file", default="./configs/tiger.yml", help="path to config file", type=str
)
parser.add_argument("opts", help="Modify config options using the command-line", default=None,
nargs=argparse.REMAINDER)
parser.add_argument("--index_flod", help="Index of k-flod", default=3, type=int) #k-flod
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
if args.config_file != "":
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.DATASETS.INDEX_FLOD = args.index_flod
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if output_dir and not os.path.exists(output_dir):
os.makedirs(output_dir)
logger = setup_logger("reid_baseline", output_dir, 0)
logger.info("Using {} GPUS".format(num_gpus))
logger.info(args)
if args.config_file != "":
logger.info("Loaded configuration file {}".format(args.config_file))
with open(args.config_file, 'r') as cf:
config_str = "\n" + cf.read()
logger.info(config_str)
logger.info("Running with config:\n{}".format(cfg))
if cfg.MODEL.DEVICE == "cuda":
os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID # new add by gu
cudnn.benchmark = True
train(cfg)
示例4: main
# 需要導入模塊: from config import cfg [as 別名]
# 或者: from config.cfg import freeze [as 別名]
def main():
args = parse_args()
update_config(cfg, args)
cfg.defrost()
cfg.RANK = args.rank
cfg.freeze()
logger, final_output_dir, tb_log_dir = create_logger(
cfg, args.cfg, 'train'
)
logger.info(pprint.pformat(args))
logger.info(cfg)
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or cfg.MULTIPROCESSING_DISTRIBUTED
ngpus_per_node = torch.cuda.device_count()
if cfg.MULTIPROCESSING_DISTRIBUTED:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(
main_worker,
nprocs=ngpus_per_node,
args=(ngpus_per_node, args, final_output_dir, tb_log_dir)
)
else:
# Simply call main_worker function
main_worker(
','.join([str(i) for i in cfg.GPUS]),
ngpus_per_node,
args,
final_output_dir,
tb_log_dir
)