当前位置: 首页>>代码示例>>Python>>正文


Python optimizers.get_optimizer方法代码示例

本文整理汇总了Python中optimizers.get_optimizer方法的典型用法代码示例。如果您正苦于以下问题:Python optimizers.get_optimizer方法的具体用法?Python optimizers.get_optimizer怎么用?Python optimizers.get_optimizer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在optimizers的用法示例。


在下文中一共展示了optimizers.get_optimizer方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: run

# 需要导入模块: import optimizers [as 别名]
# 或者: from optimizers import get_optimizer [as 别名]
def run(config):
    train_dir = config.train.dir

    model = get_model(config).cuda()
    criterion = get_loss(config)
    optimizer = get_optimizer(config, model.parameters())

    checkpoint = utils.checkpoint.get_initial_checkpoint(config)
    if checkpoint is not None:
        last_epoch, step = utils.checkpoint.load_checkpoint(model, optimizer, checkpoint)
    else:
        last_epoch, step = -1, -1

    print('from checkpoint: {} last epoch:{}'.format(checkpoint, last_epoch))
    scheduler = get_scheduler(config, optimizer, last_epoch)

    dataloaders = {split:get_dataloader(config, split, get_transform(config, split))
                   for split in ['train', 'val']}

    writer = SummaryWriter(config.train.dir)
    train(config, model, dataloaders, criterion, optimizer, scheduler,
          writer, last_epoch+1) 
开发者ID:pudae,项目名称:kaggle-hpa,代码行数:24,代码来源:train.py

示例2: run

# 需要导入模块: import optimizers [as 别名]
# 或者: from optimizers import get_optimizer [as 别名]
def run(config):
    train_dir = config.train.dir

    task = get_task(config)
    optimizer = get_optimizer(config, task.get_model().parameters())

    checkpoint = utils.checkpoint.get_initial_checkpoint(config)
    if checkpoint is not None:
        last_epoch, step = utils.checkpoint.load_checkpoint(task.get_model(),
                                                            optimizer,
                                                            checkpoint)
    else:
        last_epoch, step = -1, -1

    print('from checkpoint: {} last epoch:{}'.format(checkpoint, last_epoch))
    scheduler = get_scheduler(config, optimizer, last_epoch)

    preprocess_opt = task.get_preprocess_opt()
    dataloaders = {split:get_dataloader(config, split,
                                        get_transform(config, split,
                                                      **preprocess_opt))
                   for split in ['train', 'dev']}

    writer = SummaryWriter(config.train.dir)
    train(config, task, dataloaders, optimizer, scheduler,
          writer, last_epoch+1) 
开发者ID:pudae,项目名称:kaggle-humpback,代码行数:28,代码来源:train.py

示例3: __init__

# 需要导入模块: import optimizers [as 别名]
# 或者: from optimizers import get_optimizer [as 别名]
def __init__(self, args, logger):
        self.args = args
        self.logger = logger
        self.writer = SummaryWriter(args.log_dir)
        cudnn.enabled = True

        # set up model
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = get_aux_net(args.network.arch)(aux_classes=args.aux_classes + 1, classes=args.n_classes)
        self.model = self.model.to(self.device)

        if args.mode == 'train':
            # set up optimizer, lr scheduler and loss functions
            optimizer = get_optimizer(self.args.training.optimizer)
            optimizer_params = {k: v for k, v in self.args.training.optimizer.items() if k != "name"}
            self.optimizer = optimizer(self.model.parameters(), **optimizer_params)
            self.scheduler = get_scheduler(self.optimizer, self.args.training.lr_scheduler)

            self.class_loss_func = nn.CrossEntropyLoss()

            self.start_iter = 0

            # resume
            if args.training.resume:
                self.load(args.model_dir + '/' + args.training.resume)

            cudnn.benchmark = True

        elif args.mode == 'val':
            self.load(os.path.join(args.model_dir, args.validation.model))
        else:
            self.load(os.path.join(args.model_dir, args.testing.model)) 
开发者ID:Jiaolong,项目名称:self-supervised-da,代码行数:34,代码来源:aux_model.py

示例4: search_once

# 需要导入模块: import optimizers [as 别名]
# 或者: from optimizers import get_optimizer [as 别名]
def search_once(config, policy):
    model = get_model(config).cuda()
    criterion = get_loss(config)
    optimizer = get_optimizer(config, model.parameters())
    scheduler = get_scheduler(config, optimizer, -1)

    transforms = {'train': get_transform(config, 'train', params={'policies': policy}),
                  'val': get_transform(config, 'val')}
    dataloaders = {split:get_dataloader(config, split, transforms[split])
                   for split in ['train', 'val']}

    score_dict = train(config, model, dataloaders, criterion, optimizer, scheduler, None, 0)
    return score_dict['f1_mavg'] 
开发者ID:pudae,项目名称:kaggle-hpa,代码行数:15,代码来源:augmentation_search.py


注:本文中的optimizers.get_optimizer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。