當前位置: 首頁>>代碼示例>>Python>>正文


Python cudnn.enabled方法代碼示例

本文整理匯總了Python中torch.backends.cudnn.enabled方法的典型用法代碼示例。如果您正苦於以下問題:Python cudnn.enabled方法的具體用法?Python cudnn.enabled怎麽用?Python cudnn.enabled使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.backends.cudnn的用法示例。


在下文中一共展示了cudnn.enabled方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: initial_setup

# 需要導入模塊: from torch.backends import cudnn [as 別名]
# 或者: from torch.backends.cudnn import enabled [as 別名]
def initial_setup(self, args):
        ############
        logging.info(f"run pid: {os.getpid()} parent: {os.getppid()}")
        logging.info("#########")
        logging.info(args.__dict__)
        logging.info(f"Rank: {args.rank} World_size: {args.world_size}, Run {args.run_name}")

        args.cuda = torch.cuda.is_available()
        logging.info(f"Pytorch version: {torch.__version__}")
        logging.info("Using CUDA: {} CUDA AVAIL: {} #DEVICES: {} VERSION: {}".format(
            args.cuda, torch.cuda.is_available(), torch.cuda.device_count(),
            torch.version.cuda))
        if not args.cuda:
            self.device = 'cpu'
        else:
            self.device = 'cuda'
            cudnn.benchmark = True
            cudnn.enabled = True

        random.seed(args.seed) # The seed needs to be constant between processes.
        torch.manual_seed(args.seed)
        torch.cuda.manual_seed_all(args.seed) 
開發者ID:facebookresearch,項目名稱:fastMRI,代碼行數:24,代碼來源:base_trainer.py

示例2: main

# 需要導入模塊: from torch.backends import cudnn [as 別名]
# 或者: from torch.backends.cudnn import enabled [as 別名]
def main(args):
    cudnn.benchmark = True
    cudnn.enabled = True
    
    save_path = args.logs_dir
    sys.stdout = Logger(osp.join(args.logs_dir, 'log'+ str(args.merge_percent)+ time.strftime(".%m_%d_%H:%M:%S") + '.txt'))

    # get all unlabeled data for training
    dataset_all = datasets.create(args.dataset, osp.join(args.data_dir, args.dataset))
    new_train_data, cluster_id_labels = change_to_unlabel(dataset_all)

    num_train_ids = len(np.unique(np.array(cluster_id_labels)))
    nums_to_merge = int(num_train_ids * args.merge_percent)

    BuMain = Bottom_up(model_name=args.arch, batch_size=args.batch_size, 
            num_classes=num_train_ids,
            dataset=dataset_all,
            u_data=new_train_data, save_path=args.logs_dir, max_frames=args.max_frames,
            embeding_fea_size=args.fea)


    for step in range(int(1/args.merge_percent)-1):
        print('step: ',step)

        BuMain.train(new_train_data, step, loss=args.loss) 

        
        BuMain.evaluate(dataset_all.query, dataset_all.gallery)

        # get new train data for the next iteration
        print('----------------------------------------bottom-up clustering------------------------------------------------')
        cluster_id_labels, new_train_data = BuMain.get_new_train_data_v2(cluster_id_labels, nums_to_merge, step, penalty=args.size_penalty)
        print('\n\n') 
開發者ID:gddingcs,項目名稱:Dispersion-based-Clustering,代碼行數:35,代碼來源:run.py

示例3: main

# 需要導入模塊: from torch.backends import cudnn [as 別名]
# 或者: from torch.backends.cudnn import enabled [as 別名]
def main():
    # Parse the JSON arguments
    config_args = parse_args()

    # Create the experiment directories
    _, config_args.summary_dir, config_args.checkpoint_dir = create_experiment_dirs(
        config_args.experiment_dir)

    model = MobileNetV2(config_args)

    if config_args.cuda:
        model.cuda()
        cudnn.enabled = True
        cudnn.benchmark = True

    print("Loading Data...")
    data = CIFAR10Data(config_args)
    print("Data loaded successfully\n")

    trainer = Train(model, data.trainloader, data.testloader, config_args)

    if config_args.to_train:
        try:
            print("Training...")
            trainer.train()
            print("Training Finished\n")
        except KeyboardInterrupt:
            pass

    if config_args.to_test:
        print("Testing...")
        trainer.test(data.testloader)
        print("Testing Finished\n") 
開發者ID:MG2033,項目名稱:MobileNet-V2,代碼行數:35,代碼來源:main.py

示例4: __init__

# 需要導入模塊: from torch.backends import cudnn [as 別名]
# 或者: from torch.backends.cudnn import enabled [as 別名]
def __init__(self, args, logger):
        self.args = args
        self.logger = logger
        self.writer = SummaryWriter(args.log_dir)
        cudnn.enabled = True

        # set up model
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = get_aux_net(args.network.arch)(aux_classes=args.aux_classes + 1, classes=args.n_classes)
        self.model = self.model.to(self.device)

        if args.mode == 'train':
            # set up optimizer, lr scheduler and loss functions
            optimizer = get_optimizer(self.args.training.optimizer)
            optimizer_params = {k: v for k, v in self.args.training.optimizer.items() if k != "name"}
            self.optimizer = optimizer(self.model.parameters(), **optimizer_params)
            self.scheduler = get_scheduler(self.optimizer, self.args.training.lr_scheduler)

            self.class_loss_func = nn.CrossEntropyLoss()

            self.start_iter = 0

            # resume
            if args.training.resume:
                self.load(args.model_dir + '/' + args.training.resume)

            cudnn.benchmark = True

        elif args.mode == 'val':
            self.load(os.path.join(args.model_dir, args.validation.model))
        else:
            self.load(os.path.join(args.model_dir, args.testing.model)) 
開發者ID:Jiaolong,項目名稱:self-supervised-da,代碼行數:34,代碼來源:aux_model.py

示例5: main

# 需要導入模塊: from torch.backends import cudnn [as 別名]
# 或者: from torch.backends.cudnn import enabled [as 別名]
def main():
    if not torch.cuda.is_available():
        logging.info('No GPU found!')
        sys.exit(1)
    
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.enabled = True
    cudnn.benchmark = True
    
    logging.info("Args = %s", args)
    
    _, model_state_dict, epoch, step, optimizer_state_dict, best_acc_top1 = utils.load(args.output_dir)
    train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler = build_imagenet(model_state_dict, optimizer_state_dict, epoch=epoch-1)

    while epoch < args.epochs:
        scheduler.step()
        logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
        train_acc, train_obj, step = train(train_queue, model, optimizer, step, train_criterion)
        logging.info('train_acc %f', train_acc)
        valid_acc_top1, valid_acc_top5, valid_obj = valid(valid_queue, model, eval_criterion)
        logging.info('valid_acc_top1 %f', valid_acc_top1)
        logging.info('valid_acc_top5 %f', valid_acc_top5)

        epoch += 1
        is_best = False
        if valid_acc_top1 > best_acc_top1:
            best_acc_top1 = valid_acc_top1
            is_best = True
        utils.save(args.output_dir, args, model, epoch, step, optimizer, best_acc_top1, is_best) 
開發者ID:renqianluo,項目名稱:NAO_pytorch,代碼行數:34,代碼來源:train_imagenet.py

示例6: main

# 需要導入模塊: from torch.backends import cudnn [as 別名]
# 或者: from torch.backends.cudnn import enabled [as 別名]
def main():
    if not torch.cuda.is_available():
        logging.info('No GPU found!')
        sys.exit(1)
    
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.enabled = True
    cudnn.benchmark = False
    cudnn.deterministic = True
    
    args.steps = int(np.ceil(50000 / args.batch_size)) * args.epochs
    logging.info("Args = %s", args)
    
    _, model_state_dict, epoch, step, optimizer_state_dict, best_acc_top1 = utils.load(args.output_dir)
    build_fn = get_builder(args.dataset)
    train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler = build_fn(model_state_dict, optimizer_state_dict, epoch=epoch-1)

    while epoch < args.epochs:
        scheduler.step()
        logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
        train_acc, train_obj, step = train(train_queue, model, optimizer, step, train_criterion)
        logging.info('train_acc %f', train_acc)
        valid_acc_top1, valid_obj = valid(valid_queue, model, eval_criterion)
        logging.info('valid_acc %f', valid_acc_top1)
        epoch += 1
        is_best = False
        if valid_acc_top1 > best_acc_top1:
            best_acc_top1 = valid_acc_top1
            is_best = True
        utils.save(args.output_dir, args, model, epoch, step, optimizer, best_acc_top1, is_best) 
開發者ID:renqianluo,項目名稱:NAO_pytorch,代碼行數:36,代碼來源:test_cifar.py

示例7: main

# 需要導入模塊: from torch.backends import cudnn [as 別名]
# 或者: from torch.backends.cudnn import enabled [as 別名]
def main():
    if not torch.cuda.is_available():
        logging.info('No GPU found!')
        sys.exit(1)
    
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.enabled = True
    cudnn.benchmark = True
    
    args.steps = int(np.ceil(50000 / args.batch_size)) * args.epochs
    logging.info("Args = %s", args)
    
    _, model_state_dict, epoch, step, optimizer_state_dict, best_acc_top1 = utils.load(args.output_dir)
    build_fn = get_builder(args.dataset)
    train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler = build_fn(model_state_dict, optimizer_state_dict, epoch=epoch-1)

    while epoch < args.epochs:
        scheduler.step()
        logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
        train_acc, train_obj, step = train(train_queue, model, optimizer, step, train_criterion)
        logging.info('train_acc %f', train_acc)
        valid_acc_top1, valid_obj = valid(valid_queue, model, eval_criterion)
        logging.info('valid_acc %f', valid_acc_top1)
        epoch += 1
        is_best = False
        if valid_acc_top1 > best_acc_top1:
            best_acc_top1 = valid_acc_top1
            is_best = True
        utils.save(args.output_dir, args, model, epoch, step, optimizer, best_acc_top1, is_best) 
開發者ID:renqianluo,項目名稱:NAO_pytorch,代碼行數:34,代碼來源:train_cifar.py

示例8: run

# 需要導入模塊: from torch.backends import cudnn [as 別名]
# 或者: from torch.backends.cudnn import enabled [as 別名]
def run(args):
    cudnn.benchmark = True
    cudnn.enabled = True
    if args.mode == 'train':
        trainer = create_trainer(args)
        model = VariationalNetworkModel(args)
        trainer.fit(model)
    else:  # args.mode == 'test' or args.mode == 'challenge'
        assert args.checkpoint is not None
        model = VariationalNetworkModel.load_from_checkpoint(
            str(args.checkpoint))
        model.hparams = args
        model.hparams.sample_rate = 1.
        trainer = create_trainer(args)
        trainer.test(model) 
開發者ID:facebookresearch,項目名稱:fastMRI,代碼行數:17,代碼來源:varnet.py

示例9: main

# 需要導入模塊: from torch.backends import cudnn [as 別名]
# 或者: from torch.backends.cudnn import enabled [as 別名]
def main():
  if not torch.cuda.is_available():
    logging.info('no gpu device available')
    sys.exit(1)

  np.random.seed(args.seed)
  torch.cuda.set_device(args.gpu)
  cudnn.benchmark = True
  torch.manual_seed(args.seed)
  cudnn.enabled=True
  torch.cuda.manual_seed(args.seed)
  logging.info('gpu device = %d' % args.gpu)
  logging.info("args = %s", args)

  genotype = eval("genotypes.%s" % args.arch)
  model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
  model = model.cuda()
  utils.load(model, args.model_path)

  logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

  criterion = nn.CrossEntropyLoss()
  criterion = criterion.cuda()

  _, test_transform = utils._data_transforms_cifar10(args)
  test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform)

  test_queue = torch.utils.data.DataLoader(
      test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)

  model.drop_path_prob = args.drop_path_prob
  test_acc, test_obj = infer(test_queue, model, criterion)
  logging.info('test_acc %f', test_acc) 
開發者ID:kcyu2014,項目名稱:eval-nas,代碼行數:35,代碼來源:test.py

示例10: main

# 需要導入模塊: from torch.backends import cudnn [as 別名]
# 或者: from torch.backends.cudnn import enabled [as 別名]
def main():
    if not torch.cuda.is_available():
        logging.info('No GPU found!')
        sys.exit(1)
    
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.enabled = True
    cudnn.benchmark = False
    cudnn.deterministic = True
    
    logging.info("Args = %s", args)
    
    _, model_state_dict, epoch, step, optimizer_state_dict, best_acc_top1 = utils.load(args.output_dir)
    train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler = build_imagenet(model_state_dict, optimizer_state_dict, epoch=epoch-1)

    while epoch < args.epochs:
        scheduler.step()
        logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
        train_acc, train_obj, step = train(train_queue, model, optimizer, step, train_criterion)
        logging.info('train_acc %f', train_acc)
        valid_acc_top1, valid_acc_top5, valid_obj = valid(valid_queue, model, eval_criterion)
        logging.info('valid_acc_top1 %f', valid_acc_top1)
        logging.info('valid_acc_top5 %f', valid_acc_top5)

        epoch += 1
        is_best = False
        if valid_acc_top1 > best_acc_top1:
            best_acc_top1 = valid_acc_top1
            is_best = True
        utils.save(args.output_dir, args, model, epoch, step, optimizer, best_acc_top1, is_best) 
開發者ID:kcyu2014,項目名稱:eval-nas,代碼行數:35,代碼來源:train_imagenet.py

示例11: main

# 需要導入模塊: from torch.backends import cudnn [as 別名]
# 或者: from torch.backends.cudnn import enabled [as 別名]
def main():
    if not torch.cuda.is_available():
        logging.info('No GPU found!')
        sys.exit(1)
    
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.enabled = True
    cudnn.benchmark = False
    cudnn.deterministic = True
    
    args.steps = int(np.ceil(50000 / args.batch_size)) * args.epochs
    logging.info("Args = %s", args)
    
    _, model_state_dict, epoch, step, optimizer_state_dict, best_acc_top1 = utils.load(args.output_dir)
    build_fn = get_builder(args.dataset)
    train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler = build_fn(model_state_dict, optimizer_state_dict, epoch=epoch-1)

    while epoch < args.epochs:
        scheduler.step()
        logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
        train_acc, train_obj, step = train(train_queue, model, optimizer, step, train_criterion)
        logging.info('train_acc %f', train_acc)
        valid_acc_top1, valid_obj = valid(valid_queue, model, eval_criterion)
        logging.info('valid_acc %f', valid_acc_top1)
        epoch += 1
        is_best = False
        if valid_acc_top1 > best_acc_top1:
            best_acc_top1 = valid_acc_top1
            is_best = True
        utils.save(args.output_dir, args, model, epoch, step, optimizer, best_acc_top1, is_best) 
開發者ID:kcyu2014,項目名稱:eval-nas,代碼行數:35,代碼來源:train_cifar.py

示例12: _init_device

# 需要導入模塊: from torch.backends import cudnn [as 別名]
# 或者: from torch.backends.cudnn import enabled [as 別名]
def _init_device(self):
        if not torch.cuda.is_available():
            self.logger.info('no gpu device available')
            sys.exit(1)
        self.current_gpu, _ = dutils.get_gpus_memory_info()
        np.random.seed(self.args.seed)
        torch.cuda.set_device(self.current_gpu)
        cudnn.benchmark = True
        torch.manual_seed(self.args.seed)
        cudnn.enabled = True
        torch.cuda.manual_seed(self.args.seed)
        self.logger.info('gpu device = %d', self.current_gpu) 
開發者ID:antoyang,項目名稱:NAS-Benchmark,代碼行數:14,代碼來源:train_search.py

示例13: _init_random_and_device

# 需要導入模塊: from torch.backends import cudnn [as 別名]
# 或者: from torch.backends.cudnn import enabled [as 別名]
def _init_random_and_device(self):
        # Set random seed and cuda device
        np.random.seed(self.args.seed)
        cudnn.benchmark = True
        torch.manual_seed(self.args.seed)
        cudnn.enabled = True
        torch.cuda.manual_seed(self.args.seed)
        max_free_gpu_id, gpus_info = dutils.get_gpus_memory_info()
        self.device_id = max_free_gpu_id
        self.gpus_info = gpus_info
        self.device = torch.device('cuda:{}'.format(0 if self.args.multi_gpus else self.device_id)) 
開發者ID:antoyang,項目名稱:NAS-Benchmark,代碼行數:13,代碼來源:train_cnn.py

示例14: main

# 需要導入模塊: from torch.backends import cudnn [as 別名]
# 或者: from torch.backends.cudnn import enabled [as 別名]
def main():
    if not torch.cuda.is_available():
        logging.info('No GPU found!')
        sys.exit(1)
    
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    cudnn.enabled = True
    cudnn.benchmark = False
    cudnn.deterministic = True
    torch.cuda.manual_seed(args.seed)
    
    logging.info("Args = %s", args)
    
    _, model_state_dict, epoch, step, optimizer_state_dict, best_acc_top1 = utils.load(args.output_dir)
    train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler = build_imagenet(model_state_dict, optimizer_state_dict, epoch=epoch-1)

    while epoch < args.epochs:
        scheduler.step()
        logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
        train_acc, train_obj, step = train(train_queue, model, optimizer, step, train_criterion)
        logging.info('train_acc %f', train_acc)
        valid_acc_top1, valid_acc_top5, valid_obj = valid(valid_queue, model, eval_criterion)
        logging.info('valid_acc_top1 %f', valid_acc_top1)
        logging.info('valid_acc_top5 %f', valid_acc_top5)

        epoch += 1
        is_best = False
        if valid_acc_top1 > best_acc_top1:
            best_acc_top1 = valid_acc_top1
            is_best = True
        utils.save(args.output_dir, args, model, epoch, step, optimizer, best_acc_top1, is_best) 
開發者ID:antoyang,項目名稱:NAS-Benchmark,代碼行數:34,代碼來源:train_imagenet.py

示例15: main

# 需要導入模塊: from torch.backends import cudnn [as 別名]
# 或者: from torch.backends.cudnn import enabled [as 別名]
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    np.random.seed(args.seed)
    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)
    cudnn.enabled=True
    torch.cuda.manual_seed(args.seed)
    logging.info('gpu device = %d' % args.gpu)
    logging.info("args = %s", args)

    genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
    model = model.cuda()
    utils.load(model, args.model_path)

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()

    _, test_transform = utils.data_transforms_cifar10(args)
    test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform)

    test_queue = torch.utils.data.DataLoader(
            test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)

    model.drop_path_prob = args.drop_path_prob
    test_acc, test_obj = infer(test_queue, model, criterion)
    logging.info('test_acc %f', test_acc) 
開發者ID:antoyang,項目名稱:NAS-Benchmark,代碼行數:35,代碼來源:test.py


注:本文中的torch.backends.cudnn.enabled方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。