当前位置: 首页>>代码示例>>Python>>正文


Python cudnn.deterministic方法代码示例

本文整理汇总了Python中torch.backends.cudnn.deterministic方法的典型用法代码示例。如果您正苦于以下问题:Python cudnn.deterministic方法的具体用法?Python cudnn.deterministic怎么用?Python cudnn.deterministic使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.backends.cudnn的用法示例。


在下文中一共展示了cudnn.deterministic方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from torch.backends import cudnn [as 别名]
# 或者: from torch.backends.cudnn import deterministic [as 别名]
def main():
    args = parser.parse_args()

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    hvd.init()
    local_rank = hvd.local_rank()
    torch.cuda.set_device(local_rank)

    main_worker(local_rank, 4, args) 
开发者ID:tczhangzhi,项目名称:pytorch-distributed,代码行数:20,代码来源:horovod_distributed.py

示例2: main

# 需要导入模块: from torch.backends import cudnn [as 别名]
# 或者: from torch.backends.cudnn import deterministic [as 别名]
def main():
    args = parser.parse_args()

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        # torch.backends.cudnn.enabled = False
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    args.local_rank = int(os.environ["SLURM_PROCID"])
    args.world_size = int(os.environ["SLURM_NPROCS"])
    ngpus_per_node = torch.cuda.device_count()

    job_id = os.environ["SLURM_JOBID"]
    args.dist_url = "file://{}.{}".format(os.path.realpath(args.dist_file), job_id)
    mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args)) 
开发者ID:tczhangzhi,项目名称:pytorch-distributed,代码行数:23,代码来源:distributed_slurm_main.py

示例3: init_rand

# 需要导入模块: from torch.backends import cudnn [as 别名]
# 或者: from torch.backends.cudnn import deterministic [as 别名]
def init_rand(seed):
    """
    Initialize all random generators by seed.

    Parameters:
    ----------
    seed : int
        Seed value.

    Returns
    -------
    int
        Generated seed value.
    """
    if seed <= 0:
        seed = np.random.randint(10000)
    else:
        cudnn.deterministic = True
        logging.warning(
            "You have chosen to seed training. This will turn on the CUDNN deterministic setting, which can slow down "
            "your training considerably! You may see unexpected behavior when restarting from checkpoints.")
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    return seed 
开发者ID:osmr,项目名称:imgclsmob,代码行数:27,代码来源:train_pt.py

示例4: automated_deep_compression

# 需要导入模块: from torch.backends import cudnn [as 别名]
# 或者: from torch.backends.cudnn import deterministic [as 别名]
def automated_deep_compression(model, criterion, loggers, args):
    import examples.automated_deep_compression.ADC as ADC
    HAVE_COACH_INSTALLED = True
    if not HAVE_COACH_INSTALLED:
        raise ValueError("ADC is currently experimental and uses non-public Coach features")

    if not isinstance(loggers, list):
        loggers = [loggers]

    train_loader, val_loader, test_loader, _ = apputils.load_data(
        args.dataset, os.path.expanduser(args.data), args.batch_size,
        args.workers, args.validation_size, args.deterministic)

    args.display_confusion = True
    validate_fn = partial(validate, val_loader=test_loader, criterion=criterion,
                          loggers=loggers, args=args)

    if args.ADC_params is not None:
        ADC.summarize_experiment(args.ADC_params, args.dataset, args.arch, validate_fn)
        exit()

    save_checkpoint_fn = partial(apputils.save_checkpoint, arch=args.arch, dir=msglogger.logdir)
    ADC.do_adc(model, args.dataset, args.arch, val_loader, validate_fn, save_checkpoint_fn) 
开发者ID:cornell-zhang,项目名称:dnn-quant-ocs,代码行数:25,代码来源:compress_classifier.py

示例5: init

# 需要导入模块: from torch.backends import cudnn [as 别名]
# 或者: from torch.backends.cudnn import deterministic [as 别名]
def init():
    global args, logger, writer
    args = get_parser()
    logger = get_logger()
    writer = SummaryWriter(args.save_path)
    os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(x) for x in args.train_gpu)
    if args.manual_seed is not None:
        cudnn.benchmark = False
        cudnn.deterministic = True
        torch.manual_seed(args.manual_seed)
        np.random.seed(args.manual_seed)
        torch.manual_seed(args.manual_seed)
        torch.cuda.manual_seed_all(args.manual_seed)
    if len(args.train_gpu) == 1:
        args.sync_bn = False
    logger.info(args) 
开发者ID:hszhao,项目名称:PointWeb,代码行数:18,代码来源:train.py

示例6: set_random_seed

# 需要导入模块: from torch.backends import cudnn [as 别名]
# 或者: from torch.backends.cudnn import deterministic [as 别名]
def set_random_seed(random_seed):
        if random_seed is not None:
            print("Set random seed as {}".format(random_seed))
            os.environ['PYTHONHASHSEED'] = str(random_seed)
            random.seed(random_seed)
            np.random.seed(random_seed)
            torch.manual_seed(random_seed)
            torch.cuda.manual_seed_all(random_seed)
            torch.set_num_threads(1)
            cudnn.benchmark = False
            cudnn.deterministic = True
            warnings.warn('You have chosen to seed training. '
                          'This will turn on the CUDNN deterministic setting, '
                          'which can slow down your training considerably! '
                          'You may see unexpected behavior when restarting '
                          'from checkpoints.') 
开发者ID:seanie12,项目名称:mrqa,代码行数:18,代码来源:trainer.py

示例7: main

# 需要导入模块: from torch.backends import cudnn [as 别名]
# 或者: from torch.backends.cudnn import deterministic [as 别名]
def main(args: Namespace) -> None:
    model = ImageNetLightningModel(**vars(args))

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True

    trainer = pl.Trainer(
        default_root_dir=args.save_path,
        gpus=args.gpus,
        max_epochs=args.epochs,
        distributed_backend=args.distributed_backend,
        precision=16 if args.use_16bit else 32,
    )

    if args.evaluate:
        trainer.run_evaluation()
    else:
        trainer.fit(model) 
开发者ID:PyTorchLightning,项目名称:pytorch-lightning,代码行数:22,代码来源:imagenet.py

示例8: main

# 需要导入模块: from torch.backends import cudnn [as 别名]
# 或者: from torch.backends.cudnn import deterministic [as 别名]
def main(args):
    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    print('=> load data...')
    corpus = data.Corpus(args.data)

    eval_batch_size = 10
    train_loader = batchify(corpus.train, args.batch_size, device)
    val_loader = batchify(corpus.valid, eval_batch_size, device)

    ntokens = len(corpus.dictionary)
    main_worker(train_loader, val_loader, ntokens, args, device) 
开发者ID:Cerebras,项目名称:online-normalization,代码行数:24,代码来源:ptb_main.py

示例9: prepare_cudnn

# 需要导入模块: from torch.backends import cudnn [as 别名]
# 或者: from torch.backends.cudnn import deterministic [as 别名]
def prepare_cudnn(deterministic: bool = None, benchmark: bool = None) -> None:
    """
    Prepares CuDNN benchmark and sets CuDNN
    to be deterministic/non-deterministic mode

    Args:
        deterministic (bool): deterministic mode if running in CuDNN backend.
        benchmark (bool): If ``True`` use CuDNN heuristics to figure out
            which algorithm will be most performant
            for your model architecture and input.
            Setting it to ``False`` may slow down your training.
    """
    if torch.cuda.is_available():
        # CuDNN reproducibility
        # https://pytorch.org/docs/stable/notes/randomness.html#cudnn
        if deterministic is None:
            deterministic = (
                os.environ.get("CUDNN_DETERMINISTIC", "True") == "True"
            )
        cudnn.deterministic = deterministic

        # https://discuss.pytorch.org/t/how-should-i-disable-using-cudnn-in-my-code/38053/4
        if benchmark is None:
            benchmark = os.environ.get("CUDNN_BENCHMARK", "True") == "True"
        cudnn.benchmark = benchmark 
开发者ID:catalyst-team,项目名称:catalyst,代码行数:27,代码来源:torch.py

示例10: main

# 需要导入模块: from torch.backends import cudnn [as 别名]
# 或者: from torch.backends.cudnn import deterministic [as 别名]
def main():
    args = parser.parse_args()
    args.store_name = '_'.join([args.dataset, args.arch, args.loss_type, args.train_rule, args.imb_type, str(args.imb_factor), args.exp_str])
    prepare_folders(args)
    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    if args.gpu is not None:
        warnings.warn('You have chosen a specific GPU. This will completely '
                      'disable data parallelism.')

    ngpus_per_node = torch.cuda.device_count()
    main_worker(args.gpu, ngpus_per_node, args) 
开发者ID:kaidic,项目名称:LDAM-DRW,代码行数:22,代码来源:cifar_train.py

示例11: main

# 需要导入模块: from torch.backends import cudnn [as 别名]
# 或者: from torch.backends.cudnn import deterministic [as 别名]
def main():
    args = parser.parse_args()

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    gpus = [0, 1, 2, 3]
    main_worker(gpus=gpus, args=args) 
开发者ID:tczhangzhi,项目名称:pytorch-distributed,代码行数:17,代码来源:dataparallel.py

示例12: main

# 需要导入模块: from torch.backends import cudnn [as 别名]
# 或者: from torch.backends.cudnn import deterministic [as 别名]
def main():
    args = parser.parse_args()

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    main_worker(args.local_rank, 4, args) 
开发者ID:tczhangzhi,项目名称:pytorch-distributed,代码行数:16,代码来源:distributed.py

示例13: main

# 需要导入模块: from torch.backends import cudnn [as 别名]
# 或者: from torch.backends.cudnn import deterministic [as 别名]
def main():
    args = parser.parse_args()
    if(not os.path.exists(os.path.join(args.save_folder, args.dataset, args.network))):
        os.makedirs(os.path.join(args.save_folder, args.dataset, args.network))
    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    if args.gpu is not None:
        warnings.warn('You have chosen a specific GPU. This will completely '
                      'disable data parallelism.')
    os.environ['MASTER_ADDR'] = 'localhost'
    os.environ['MASTER_PORT'] = '12355'
    os.environ['WORLD_SIZE'] = '2'
    if args.dist_url == "env://" and args.world_size == -1:
        args.world_size = int(os.environ["WORLD_SIZE"])

    args.distributed = args.world_size > 1 or args.multiprocessing_distributed
    ngpus_per_node = torch.cuda.device_count()
    if args.multiprocessing_distributed:
        # Since we have ngpus_per_node processes per node, the total world_size
        # needs to be adjusted accordingly
        args.world_size = ngpus_per_node * args.world_size
        # Use torch.multiprocessing.spawn to launch distributed processes: the
        # main_worker process function
        mp.spawn(main_worker, nprocs=ngpus_per_node,
                 args=(ngpus_per_node, args))
    else:
        # Simply call main_worker function
        main_worker(args.gpu, ngpus_per_node, args) 
开发者ID:toandaominh1997,项目名称:EfficientDet.Pytorch,代码行数:38,代码来源:train.py

示例14: init_rand

# 需要导入模块: from torch.backends import cudnn [as 别名]
# 或者: from torch.backends.cudnn import deterministic [as 别名]
def init_rand(seed):
    if seed <= 0:
        seed = np.random.randint(10000)
    else:
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    return seed 
开发者ID:osmr,项目名称:imgclsmob,代码行数:16,代码来源:train_pt_cifar-.py

示例15: main

# 需要导入模块: from torch.backends import cudnn [as 别名]
# 或者: from torch.backends.cudnn import deterministic [as 别名]
def main():
    if not torch.cuda.is_available():
        logging.info('No GPU found!')
        sys.exit(1)
    
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    cudnn.enabled = True
    cudnn.benchmark = False
    cudnn.deterministic = True
    
    args.steps = int(np.ceil(50000 / args.batch_size)) * args.epochs
    logging.info("Args = %s", args)
    
    _, model_state_dict, epoch, step, optimizer_state_dict, best_acc_top1 = utils.load(args.output_dir)
    build_fn = get_builder(args.dataset)
    train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler = build_fn(model_state_dict, optimizer_state_dict, epoch=epoch-1)

    while epoch < args.epochs:
        scheduler.step()
        logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
        train_acc, train_obj, step = train(train_queue, model, optimizer, step, train_criterion)
        logging.info('train_acc %f', train_acc)
        valid_acc_top1, valid_obj = valid(valid_queue, model, eval_criterion)
        logging.info('valid_acc %f', valid_acc_top1)
        epoch += 1
        is_best = False
        if valid_acc_top1 > best_acc_top1:
            best_acc_top1 = valid_acc_top1
            is_best = True
        utils.save(args.output_dir, args, model, epoch, step, optimizer, best_acc_top1, is_best) 
开发者ID:renqianluo,项目名称:NAO_pytorch,代码行数:36,代码来源:test_cifar.py


注:本文中的torch.backends.cudnn.deterministic方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。