当前位置: 首页>>代码示例>>Python>>正文


Python torch.optim方法代码示例

本文整理汇总了Python中torch.optim方法的典型用法代码示例。如果您正苦于以下问题:Python torch.optim方法的具体用法?Python torch.optim怎么用?Python torch.optim使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.optim方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: construct_graph

# 需要导入模块: import torch [as 别名]
# 或者: from torch import optim [as 别名]
def construct_graph(self):
    # Set the random seed
    torch.manual_seed(cfg.RNG_SEED)
    # Build the main computation graph
    self.net.create_architecture(self.imdb.num_classes, tag='default')
    # Define the loss
    # loss = layers['total_loss']
    # Set learning rate and momentum
    lr = cfg.TRAIN.LEARNING_RATE
    params = []
    for key, value in dict(self.net.named_parameters()).items():
      if value.requires_grad:
        if 'bias' in key:
          params += [{'params':[value],'lr':lr*(cfg.TRAIN.DOUBLE_BIAS + 1), 'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]
        else:
          params += [{'params':[value],'lr':lr, 'weight_decay': cfg.TRAIN.WEIGHT_DECAY}]
    self.optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM)
    # Write the train and validation information to tensorboard
    self.writer = tb.writer.FileWriter(self.tbdir)
   # self.valwriter = tb.writer.FileWriter(self.tbvaldir)

    return lr, self.optimizer 
开发者ID:Sunarker,项目名称:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代码行数:24,代码来源:train_val.py

示例2: restore

# 需要导入模块: import torch [as 别名]
# 或者: from torch import optim [as 别名]
def restore(self, modules, ckpt_p, strict=True, restore_restart=False):
        print('Restoring {}... (strict={})'.format(ckpt_p, strict))
        map_location = None if pe.CUDA_AVAILABLE else 'cpu'
        state_dicts = torch.load(ckpt_p, map_location=map_location)
        # ---
        for key, m in modules.items():
            # optim implements its own load_state_dict which does not have the `strict` keyword...
            if isinstance(m, optimizer.Optimizer):
                if restore_restart:
                    print('Not restoring optimizer, --restore_restart given...')
                else:
                    try:
                        m.load_state_dict(state_dicts[key])
                    except ValueError as e:
                        raise ValueError('Error while restoring Optimizer:', str(e))
            else:
                try:
                    m.load_state_dict(state_dicts[key], strict=strict)
                except RuntimeError as e:  # loading error
                    for n, module in sorted(m.named_modules()):
                        print(n, module)
                    raise e
        return self.get_itr_from_ckpt_p(ckpt_p) 
开发者ID:fab-jul,项目名称:L3C-PyTorch,代码行数:25,代码来源:saver.py

示例3: fit

# 需要导入模块: import torch [as 别名]
# 或者: from torch import optim [as 别名]
def fit(self, observations, labels):
    def closure():
      predicted = self.predict(observations)
      loss = self.loss_fn(predicted, labels)
      self.optimizer.zero_grad()
      loss.backward()
      return loss
    old_params = parameters_to_vector(self.model.parameters())
    for lr in self.lr * .5**np.arange(10):
      self.optimizer = optim.LBFGS(self.model.parameters(), lr=lr)
      self.optimizer.step(closure)
      current_params = parameters_to_vector(self.model.parameters())
      if any(np.isnan(current_params.data.cpu().numpy())):
        print("LBFGS optimization diverged. Rolling back update...")
        vector_to_parameters(old_params, self.model.parameters())
      else:
        return 
开发者ID:mjacar,项目名称:pytorch-trpo,代码行数:19,代码来源:torch_utils.py

示例4: main

# 需要导入模块: import torch [as 别名]
# 或者: from torch import optim [as 别名]
def main():
    best_acc = 0

    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    print('==> Preparing data..')
    transforms_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])

    dataset_train = CIFAR10(root='../data', train=True, download=True, 
                            transform=transforms_train)

    train_loader = DataLoader(dataset_train, batch_size=args.batch_size, 
                              shuffle=True, num_workers=args.num_worker)

    # there are 10 classes so the dataset name is cifar-10
    classes = ('plane', 'car', 'bird', 'cat', 'deer', 
               'dog', 'frog', 'horse', 'ship', 'truck')

    print('==> Making model..')

    net = pyramidnet()
    net = nn.DataParallel(net)
    net = net.to(device)
    num_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
    print('The number of parameters of model is', num_params)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters(), lr=args.lr)
    # optimizer = optim.SGD(net.parameters(), lr=args.lr, 
    #                       momentum=0.9, weight_decay=1e-4)
    
    train(net, criterion, optimizer, train_loader, device) 
开发者ID:dnddnjs,项目名称:pytorch-multigpu,代码行数:38,代码来源:train.py

示例5: imitating

# 需要导入模块: import torch [as 别名]
# 或者: from torch import optim [as 别名]
def imitating(self, epoch):
        """
        train the user simulator by simple imitation learning (behavioral cloning)
        """
        self.user.train()
        a_loss, t_loss = 0., 0.
        data_train_iter = batch_iter(self.data_train[0], self.data_train[1], self.data_train[2], self.data_train[3])
        for i, data in enumerate(data_train_iter):
            self.optim.zero_grad()
            loss_a, loss_t = self.user_loop(data)
            a_loss += loss_a.item()
            t_loss += loss_t.item()
            loss = loss_a + loss_t
            loss.backward()
            self.optim.step()
            
            if (i+1) % self.print_per_batch == 0:
                a_loss /= self.print_per_batch
                t_loss /= self.print_per_batch
                logging.debug('<<user simulator>> epoch {}, iter {}, loss_a:{}, loss_t:{}'.format(epoch, i, a_loss, t_loss))
                a_loss, t_loss = 0., 0.
        
        if (epoch+1) % self.save_per_epoch == 0:
            self.save(self.save_dir, epoch)
        self.user.eval() 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:27,代码来源:user.py

示例6: save_algorithm

# 需要导入模块: import torch [as 别名]
# 或者: from torch import optim [as 别名]
def save_algorithm(algorithm, ckpt=None):
    '''Save all the nets for an algorithm'''
    agent = algorithm.agent
    net_names = algorithm.net_names
    model_prepath = agent.spec['meta']['model_prepath']
    if ckpt is not None:
        model_prepath = f'{model_prepath}_ckpt-{ckpt}'
    for net_name in net_names:
        net = getattr(algorithm, net_name)
        model_path = f'{model_prepath}_{net_name}_model.pt'
        save(net, model_path)
        optim_name = net_name.replace('net', 'optim')
        optim = getattr(algorithm, optim_name, None)
        if optim is not None:  # only trainable net has optim
            optim_path = f'{model_prepath}_{net_name}_optim.pt'
            save(optim, optim_path)
    logger.debug(f'Saved algorithm {util.get_class_name(algorithm)} nets {net_names} to {model_prepath}_*.pt') 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:19,代码来源:net_util.py

示例7: load_algorithm

# 需要导入模块: import torch [as 别名]
# 或者: from torch import optim [as 别名]
def load_algorithm(algorithm):
    '''Save all the nets for an algorithm'''
    agent = algorithm.agent
    net_names = algorithm.net_names
    if util.in_eval_lab_modes():
        # load specific model in eval mode
        model_prepath = agent.spec['meta']['eval_model_prepath']
    else:
        model_prepath = agent.spec['meta']['model_prepath']
    logger.info(f'Loading algorithm {util.get_class_name(algorithm)} nets {net_names} from {model_prepath}_*.pt')
    for net_name in net_names:
        net = getattr(algorithm, net_name)
        model_path = f'{model_prepath}_{net_name}_model.pt'
        load(net, model_path)
        optim_name = net_name.replace('net', 'optim')
        optim = getattr(algorithm, optim_name, None)
        if optim is not None:  # only trainable net has optim
            optim_path = f'{model_prepath}_{net_name}_optim.pt'
            load(optim, optim_path) 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:21,代码来源:net_util.py

示例8: get_optimizer

# 需要导入模块: import torch [as 别名]
# 或者: from torch import optim [as 别名]
def get_optimizer(gradient_model, config):
    if config['support'].get('flexible_step', False):
        stop_parameters = list(filter(lambda p: p.requires_grad, gradient_model.stop_gate.parameters()))
    else:
        stop_parameters = []
    init_parameters = list(filter(lambda p: p.requires_grad, gradient_model.model.parameters()))
    update_parameters = list(filter(lambda p: p.requires_grad, gradient_model.meta_lstms.parameters()))
    parameters = [
        {'params': init_parameters, 'lr': config['lr']['init_lr']},
        {'params': update_parameters, 'lr': config['lr']['update_lr']},
        {'params': stop_parameters, 'lr': config['lr']['stop_lr']}
    ]
    optimizer = optim.Adam(parameters, **config['optim'])
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.2, patience=2,
                                                           verbose=True, min_lr=1e-6)
    return optimizer, scheduler 
开发者ID:THUDM,项目名称:ScenarioMeta,代码行数:18,代码来源:main.py

示例9: train_network

# 需要导入模块: import torch [as 别名]
# 或者: from torch import optim [as 别名]
def train_network(start_epoch, epochs, optim, model, train_loader, val_loader, criterion, mixup, device, dtype,
                  batch_size, log_interval, csv_logger, save_path, claimed_acc1, claimed_acc5, best_test, local_rank,
                  child):
    my_range = range if child else trange
    for epoch in my_range(start_epoch, epochs + 1):
        train_loss, train_accuracy1, train_accuracy5, = train(model, train_loader, mixup, epoch, optim, criterion,
                                                              device, dtype, batch_size, log_interval, child)
        test_loss, test_accuracy1, test_accuracy5 = test(model, val_loader, criterion, device, dtype, child)
        optim.epoch_step()

        csv_logger.write({'epoch': epoch + 1, 'val_error1': 1 - test_accuracy1, 'val_error5': 1 - test_accuracy5,
                          'val_loss': test_loss, 'train_error1': 1 - train_accuracy1,
                          'train_error5': 1 - train_accuracy5, 'train_loss': train_loss})
        save_checkpoint({'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_prec1': best_test,
                         'optimizer': optim.state_dict()}, test_accuracy1 > best_test, filepath=save_path,
                        local_rank=local_rank)
        # TODO: save on the end of the cycle

        csv_logger.plot_progress(claimed_acc1=claimed_acc1, claimed_acc5=claimed_acc5)

        if test_accuracy1 > best_test:
            best_test = test_accuracy1

    csv_logger.write_text('Best accuracy is {:.2f}% top-1'.format(best_test * 100.)) 
开发者ID:Randl,项目名称:MobileNetV3-pytorch,代码行数:26,代码来源:imagenet.py

示例10: __init__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import optim [as 别名]
def __init__(self, args):
        self.args = args
        # ------------------------------------------------Dataset---------------------------------------------- #
        self.data = BenchmarkDataset(root=args.dataset_path, npoints=args.point_num, uniform=True, class_choice=args.class_choice)
        self.dataLoader = torch.utils.data.DataLoader(self.data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=4)
        print("Training Dataset : {} prepared.".format(len(self.data)))
        # ----------------------------------------------------------------------------------------------------- #

        # -------------------------------------------------Module---------------------------------------------- #
        self.G = Generator(batch_size=args.batch_size, features=args.G_FEAT, degrees=args.DEGREE, support=args.support).to(args.device)
        self.D = Discriminator(batch_size=args.batch_size, features=args.D_FEAT).to(args.device)             
        
        self.optimizerG = optim.Adam(self.G.parameters(), lr=args.lr, betas=(0, 0.99))
        self.optimizerD = optim.Adam(self.D.parameters(), lr=args.lr, betas=(0, 0.99))

        self.GP = GradientPenalty(args.lambdaGP, gamma=1, device=args.device)
        print("Network prepared.")
        # ----------------------------------------------------------------------------------------------------- #

        # ---------------------------------------------Visualization------------------------------------------- #
        self.vis = visdom.Visdom(port=args.visdom_port)
        assert self.vis.check_connection()
        print("Visdom connected.")
        # ----------------------------------------------------------------------------------------------------- # 
开发者ID:seowok,项目名称:TreeGAN,代码行数:26,代码来源:train.py

示例11: __init__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import optim [as 别名]
def __init__(self, config, net):
        self.log_dir = config.log_dir
        self.model_dir = config.model_dir
        self.net = net
        self.clock = TrainClock()
        self.device = config.device

        self.use_triplet = config.use_triplet
        self.use_footvel_loss = config.use_footvel_loss

        # set loss function
        self.mse = nn.MSELoss()
        self.tripletloss = nn.TripletMarginLoss(margin=config.triplet_margin)
        self.triplet_weight = config.triplet_weight
        self.foot_idx = config.foot_idx
        self.footvel_loss_weight = config.footvel_loss_weight

        # set optimizer
        self.optimizer = optim.Adam(self.net.parameters(), config.lr)
        self.scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, 0.99) 
开发者ID:ChrisWu1997,项目名称:2D-Motion-Retargeting,代码行数:22,代码来源:base_agent.py

示例12: get_network

# 需要导入模块: import torch [as 别名]
# 或者: from torch import optim [as 别名]
def get_network(net_type, params, train=True):
    net_params = params[net_type]
    net = net_params['network'](net_params['input_channel'],
                                net_params['channels'],
                                net_params['output_channel'])

    if params['GPU']:
        net.cuda()

    if train:
        net.train()
        optimizer = optim.Adam(net.parameters(),
                               lr=params['lr'],
                               betas=(params['beta1'], params['beta2']))
    else:
        net.eval()
        net.load_state_dict(torch.load(net_params['model_path']))
        optimizer = None
    return net, optimizer 
开发者ID:cmu-mlsp,项目名称:reconstructing_faces_from_voices,代码行数:21,代码来源:network.py

示例13: adversarial

# 需要导入模块: import torch [as 别名]
# 或者: from torch import optim [as 别名]
def adversarial(args):
    train_adversarial(sess_path=args.session_path,
                      batch_data_generator=args.batch_generator(args),
                      model_load_path=args.generator_load_path,
                      discriminator_load_path=args.discriminator_load_path,
                      model_optimizer_class=getattr(optim, args.generator_optimizer),
                      discriminator_optimizer_class=getattr(optim, args.discriminator_optimizer),
                      model_gradient_clipping=args.generator_gradient_clipping,
                      discriminator_gradient_clipping=args.discriminator_gradient_clipping,
                      model_learning_rate=args.generator_learning_rate,
                      discriminator_learning_rate=args.discriminator_learning_rate,
                      reset_model_optimizer=args.reset_generator_optimizer,
                      reset_discriminator_optimizer=args.reset_discriminator_optimizer,
                      g_max_q_mean=args.g_max_q_mean,
                      g_min_q_mean=args.g_min_q_mean,
                      d_min_loss=args.d_min_loss,
                      g_max_steps=args.g_max_steps,
                      d_max_steps=args.d_max_steps,
                      mc_sample_size=args.monte_carlo_sample_size,
                      mc_sample_factor=args.monte_carlo_sample_factor,
                      first_to_train=args.first_to_train,
                      control_ratio=args.control_ratio,
                      save_interval=args.save_interval,
                      enable_logging=args.enable_logging) 
开发者ID:djosix,项目名称:Performance-RNN-PyTorch,代码行数:26,代码来源:adversarial.py

示例14: loadOptimizer

# 需要导入模块: import torch [as 别名]
# 或者: from torch import optim [as 别名]
def loadOptimizer(self, network, config_dict):
        params_all_id = list(map(id, network.parameters()))
        params_posenet_id = list(map(id, network.to_pose.parameters()))
        params_toOptimize = [p for p in network.parameters() if id(p) in params_posenet_id]

        params_static_id = [id_p for id_p in params_all_id if not id_p in params_posenet_id]

        # disable gradient computation for static params, saves memory and computation
        for p in network.parameters():
            if id(p) in params_static_id:
                p.requires_grad = False

        print("Normal learning rate: {} params".format(len(params_posenet_id)))
        print("Static learning rate: {} params".format(len(params_static_id)))
        print("Total: {} params".format(len(params_all_id)))

        opt_params = [{'params': params_toOptimize, 'lr': config_dict['learning_rate']}]
        optimizer = torch.optim.Adam(opt_params, lr=config_dict['learning_rate']) #weight_decay=0.0005
        return optimizer 
开发者ID:hrhodin,项目名称:UnsupervisedGeometryAwareRepresentationLearning,代码行数:21,代码来源:train_encodeDecode_pose.py

示例15: __init__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import optim [as 别名]
def __init__(self, state_dim, action_dim, max_action):
        self.actor = Actor(state_dim, action_dim, max_action).to(device)
        self.actor_target = Actor(state_dim, action_dim, max_action).to(device)
        self.actor_target.load_state_dict(self.actor.state_dict())
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=1e-4)

        self.critic = Critic(state_dim, action_dim).to(device)
        self.critic_target = Critic(state_dim, action_dim).to(device)
        self.critic_target.load_state_dict(self.critic.state_dict())
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=1e-3)
        self.replay_buffer = Replay_buffer()
        self.writer = SummaryWriter(directory)

        self.num_critic_update_iteration = 0
        self.num_actor_update_iteration = 0
        self.num_training = 0 
开发者ID:sweetice,项目名称:Deep-reinforcement-learning-with-pytorch,代码行数:18,代码来源:DDPG.py


注:本文中的torch.optim方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。