當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.optim方法代碼示例

本文整理匯總了Python中torch.optim方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.optim方法的具體用法?Python torch.optim怎麽用?Python torch.optim使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.optim方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: construct_graph

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import optim [as 別名]
def construct_graph(self):
    # Set the random seed
    torch.manual_seed(cfg.RNG_SEED)
    # Build the main computation graph
    self.net.create_architecture(self.imdb.num_classes, tag='default')
    # Define the loss
    # loss = layers['total_loss']
    # Set learning rate and momentum
    lr = cfg.TRAIN.LEARNING_RATE
    params = []
    for key, value in dict(self.net.named_parameters()).items():
      if value.requires_grad:
        if 'bias' in key:
          params += [{'params':[value],'lr':lr*(cfg.TRAIN.DOUBLE_BIAS + 1), 'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]
        else:
          params += [{'params':[value],'lr':lr, 'weight_decay': cfg.TRAIN.WEIGHT_DECAY}]
    self.optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM)
    # Write the train and validation information to tensorboard
    self.writer = tb.writer.FileWriter(self.tbdir)
   # self.valwriter = tb.writer.FileWriter(self.tbvaldir)

    return lr, self.optimizer 
開發者ID:Sunarker,項目名稱:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代碼行數:24,代碼來源:train_val.py

示例2: restore

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import optim [as 別名]
def restore(self, modules, ckpt_p, strict=True, restore_restart=False):
        print('Restoring {}... (strict={})'.format(ckpt_p, strict))
        map_location = None if pe.CUDA_AVAILABLE else 'cpu'
        state_dicts = torch.load(ckpt_p, map_location=map_location)
        # ---
        for key, m in modules.items():
            # optim implements its own load_state_dict which does not have the `strict` keyword...
            if isinstance(m, optimizer.Optimizer):
                if restore_restart:
                    print('Not restoring optimizer, --restore_restart given...')
                else:
                    try:
                        m.load_state_dict(state_dicts[key])
                    except ValueError as e:
                        raise ValueError('Error while restoring Optimizer:', str(e))
            else:
                try:
                    m.load_state_dict(state_dicts[key], strict=strict)
                except RuntimeError as e:  # loading error
                    for n, module in sorted(m.named_modules()):
                        print(n, module)
                    raise e
        return self.get_itr_from_ckpt_p(ckpt_p) 
開發者ID:fab-jul,項目名稱:L3C-PyTorch,代碼行數:25,代碼來源:saver.py

示例3: fit

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import optim [as 別名]
def fit(self, observations, labels):
    def closure():
      predicted = self.predict(observations)
      loss = self.loss_fn(predicted, labels)
      self.optimizer.zero_grad()
      loss.backward()
      return loss
    old_params = parameters_to_vector(self.model.parameters())
    for lr in self.lr * .5**np.arange(10):
      self.optimizer = optim.LBFGS(self.model.parameters(), lr=lr)
      self.optimizer.step(closure)
      current_params = parameters_to_vector(self.model.parameters())
      if any(np.isnan(current_params.data.cpu().numpy())):
        print("LBFGS optimization diverged. Rolling back update...")
        vector_to_parameters(old_params, self.model.parameters())
      else:
        return 
開發者ID:mjacar,項目名稱:pytorch-trpo,代碼行數:19,代碼來源:torch_utils.py

示例4: main

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import optim [as 別名]
def main():
    best_acc = 0

    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    print('==> Preparing data..')
    transforms_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])

    dataset_train = CIFAR10(root='../data', train=True, download=True, 
                            transform=transforms_train)

    train_loader = DataLoader(dataset_train, batch_size=args.batch_size, 
                              shuffle=True, num_workers=args.num_worker)

    # there are 10 classes so the dataset name is cifar-10
    classes = ('plane', 'car', 'bird', 'cat', 'deer', 
               'dog', 'frog', 'horse', 'ship', 'truck')

    print('==> Making model..')

    net = pyramidnet()
    net = nn.DataParallel(net)
    net = net.to(device)
    num_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
    print('The number of parameters of model is', num_params)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(net.parameters(), lr=args.lr)
    # optimizer = optim.SGD(net.parameters(), lr=args.lr, 
    #                       momentum=0.9, weight_decay=1e-4)
    
    train(net, criterion, optimizer, train_loader, device) 
開發者ID:dnddnjs,項目名稱:pytorch-multigpu,代碼行數:38,代碼來源:train.py

示例5: imitating

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import optim [as 別名]
def imitating(self, epoch):
        """
        train the user simulator by simple imitation learning (behavioral cloning)
        """
        self.user.train()
        a_loss, t_loss = 0., 0.
        data_train_iter = batch_iter(self.data_train[0], self.data_train[1], self.data_train[2], self.data_train[3])
        for i, data in enumerate(data_train_iter):
            self.optim.zero_grad()
            loss_a, loss_t = self.user_loop(data)
            a_loss += loss_a.item()
            t_loss += loss_t.item()
            loss = loss_a + loss_t
            loss.backward()
            self.optim.step()
            
            if (i+1) % self.print_per_batch == 0:
                a_loss /= self.print_per_batch
                t_loss /= self.print_per_batch
                logging.debug('<<user simulator>> epoch {}, iter {}, loss_a:{}, loss_t:{}'.format(epoch, i, a_loss, t_loss))
                a_loss, t_loss = 0., 0.
        
        if (epoch+1) % self.save_per_epoch == 0:
            self.save(self.save_dir, epoch)
        self.user.eval() 
開發者ID:ConvLab,項目名稱:ConvLab,代碼行數:27,代碼來源:user.py

示例6: save_algorithm

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import optim [as 別名]
def save_algorithm(algorithm, ckpt=None):
    '''Save all the nets for an algorithm'''
    agent = algorithm.agent
    net_names = algorithm.net_names
    model_prepath = agent.spec['meta']['model_prepath']
    if ckpt is not None:
        model_prepath = f'{model_prepath}_ckpt-{ckpt}'
    for net_name in net_names:
        net = getattr(algorithm, net_name)
        model_path = f'{model_prepath}_{net_name}_model.pt'
        save(net, model_path)
        optim_name = net_name.replace('net', 'optim')
        optim = getattr(algorithm, optim_name, None)
        if optim is not None:  # only trainable net has optim
            optim_path = f'{model_prepath}_{net_name}_optim.pt'
            save(optim, optim_path)
    logger.debug(f'Saved algorithm {util.get_class_name(algorithm)} nets {net_names} to {model_prepath}_*.pt') 
開發者ID:ConvLab,項目名稱:ConvLab,代碼行數:19,代碼來源:net_util.py

示例7: load_algorithm

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import optim [as 別名]
def load_algorithm(algorithm):
    '''Save all the nets for an algorithm'''
    agent = algorithm.agent
    net_names = algorithm.net_names
    if util.in_eval_lab_modes():
        # load specific model in eval mode
        model_prepath = agent.spec['meta']['eval_model_prepath']
    else:
        model_prepath = agent.spec['meta']['model_prepath']
    logger.info(f'Loading algorithm {util.get_class_name(algorithm)} nets {net_names} from {model_prepath}_*.pt')
    for net_name in net_names:
        net = getattr(algorithm, net_name)
        model_path = f'{model_prepath}_{net_name}_model.pt'
        load(net, model_path)
        optim_name = net_name.replace('net', 'optim')
        optim = getattr(algorithm, optim_name, None)
        if optim is not None:  # only trainable net has optim
            optim_path = f'{model_prepath}_{net_name}_optim.pt'
            load(optim, optim_path) 
開發者ID:ConvLab,項目名稱:ConvLab,代碼行數:21,代碼來源:net_util.py

示例8: get_optimizer

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import optim [as 別名]
def get_optimizer(gradient_model, config):
    if config['support'].get('flexible_step', False):
        stop_parameters = list(filter(lambda p: p.requires_grad, gradient_model.stop_gate.parameters()))
    else:
        stop_parameters = []
    init_parameters = list(filter(lambda p: p.requires_grad, gradient_model.model.parameters()))
    update_parameters = list(filter(lambda p: p.requires_grad, gradient_model.meta_lstms.parameters()))
    parameters = [
        {'params': init_parameters, 'lr': config['lr']['init_lr']},
        {'params': update_parameters, 'lr': config['lr']['update_lr']},
        {'params': stop_parameters, 'lr': config['lr']['stop_lr']}
    ]
    optimizer = optim.Adam(parameters, **config['optim'])
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.2, patience=2,
                                                           verbose=True, min_lr=1e-6)
    return optimizer, scheduler 
開發者ID:THUDM,項目名稱:ScenarioMeta,代碼行數:18,代碼來源:main.py

示例9: train_network

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import optim [as 別名]
def train_network(start_epoch, epochs, optim, model, train_loader, val_loader, criterion, mixup, device, dtype,
                  batch_size, log_interval, csv_logger, save_path, claimed_acc1, claimed_acc5, best_test, local_rank,
                  child):
    my_range = range if child else trange
    for epoch in my_range(start_epoch, epochs + 1):
        train_loss, train_accuracy1, train_accuracy5, = train(model, train_loader, mixup, epoch, optim, criterion,
                                                              device, dtype, batch_size, log_interval, child)
        test_loss, test_accuracy1, test_accuracy5 = test(model, val_loader, criterion, device, dtype, child)
        optim.epoch_step()

        csv_logger.write({'epoch': epoch + 1, 'val_error1': 1 - test_accuracy1, 'val_error5': 1 - test_accuracy5,
                          'val_loss': test_loss, 'train_error1': 1 - train_accuracy1,
                          'train_error5': 1 - train_accuracy5, 'train_loss': train_loss})
        save_checkpoint({'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_prec1': best_test,
                         'optimizer': optim.state_dict()}, test_accuracy1 > best_test, filepath=save_path,
                        local_rank=local_rank)
        # TODO: save on the end of the cycle

        csv_logger.plot_progress(claimed_acc1=claimed_acc1, claimed_acc5=claimed_acc5)

        if test_accuracy1 > best_test:
            best_test = test_accuracy1

    csv_logger.write_text('Best accuracy is {:.2f}% top-1'.format(best_test * 100.)) 
開發者ID:Randl,項目名稱:MobileNetV3-pytorch,代碼行數:26,代碼來源:imagenet.py

示例10: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import optim [as 別名]
def __init__(self, args):
        self.args = args
        # ------------------------------------------------Dataset---------------------------------------------- #
        self.data = BenchmarkDataset(root=args.dataset_path, npoints=args.point_num, uniform=True, class_choice=args.class_choice)
        self.dataLoader = torch.utils.data.DataLoader(self.data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=4)
        print("Training Dataset : {} prepared.".format(len(self.data)))
        # ----------------------------------------------------------------------------------------------------- #

        # -------------------------------------------------Module---------------------------------------------- #
        self.G = Generator(batch_size=args.batch_size, features=args.G_FEAT, degrees=args.DEGREE, support=args.support).to(args.device)
        self.D = Discriminator(batch_size=args.batch_size, features=args.D_FEAT).to(args.device)             
        
        self.optimizerG = optim.Adam(self.G.parameters(), lr=args.lr, betas=(0, 0.99))
        self.optimizerD = optim.Adam(self.D.parameters(), lr=args.lr, betas=(0, 0.99))

        self.GP = GradientPenalty(args.lambdaGP, gamma=1, device=args.device)
        print("Network prepared.")
        # ----------------------------------------------------------------------------------------------------- #

        # ---------------------------------------------Visualization------------------------------------------- #
        self.vis = visdom.Visdom(port=args.visdom_port)
        assert self.vis.check_connection()
        print("Visdom connected.")
        # ----------------------------------------------------------------------------------------------------- # 
開發者ID:seowok,項目名稱:TreeGAN,代碼行數:26,代碼來源:train.py

示例11: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import optim [as 別名]
def __init__(self, config, net):
        self.log_dir = config.log_dir
        self.model_dir = config.model_dir
        self.net = net
        self.clock = TrainClock()
        self.device = config.device

        self.use_triplet = config.use_triplet
        self.use_footvel_loss = config.use_footvel_loss

        # set loss function
        self.mse = nn.MSELoss()
        self.tripletloss = nn.TripletMarginLoss(margin=config.triplet_margin)
        self.triplet_weight = config.triplet_weight
        self.foot_idx = config.foot_idx
        self.footvel_loss_weight = config.footvel_loss_weight

        # set optimizer
        self.optimizer = optim.Adam(self.net.parameters(), config.lr)
        self.scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, 0.99) 
開發者ID:ChrisWu1997,項目名稱:2D-Motion-Retargeting,代碼行數:22,代碼來源:base_agent.py

示例12: get_network

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import optim [as 別名]
def get_network(net_type, params, train=True):
    net_params = params[net_type]
    net = net_params['network'](net_params['input_channel'],
                                net_params['channels'],
                                net_params['output_channel'])

    if params['GPU']:
        net.cuda()

    if train:
        net.train()
        optimizer = optim.Adam(net.parameters(),
                               lr=params['lr'],
                               betas=(params['beta1'], params['beta2']))
    else:
        net.eval()
        net.load_state_dict(torch.load(net_params['model_path']))
        optimizer = None
    return net, optimizer 
開發者ID:cmu-mlsp,項目名稱:reconstructing_faces_from_voices,代碼行數:21,代碼來源:network.py

示例13: adversarial

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import optim [as 別名]
def adversarial(args):
    train_adversarial(sess_path=args.session_path,
                      batch_data_generator=args.batch_generator(args),
                      model_load_path=args.generator_load_path,
                      discriminator_load_path=args.discriminator_load_path,
                      model_optimizer_class=getattr(optim, args.generator_optimizer),
                      discriminator_optimizer_class=getattr(optim, args.discriminator_optimizer),
                      model_gradient_clipping=args.generator_gradient_clipping,
                      discriminator_gradient_clipping=args.discriminator_gradient_clipping,
                      model_learning_rate=args.generator_learning_rate,
                      discriminator_learning_rate=args.discriminator_learning_rate,
                      reset_model_optimizer=args.reset_generator_optimizer,
                      reset_discriminator_optimizer=args.reset_discriminator_optimizer,
                      g_max_q_mean=args.g_max_q_mean,
                      g_min_q_mean=args.g_min_q_mean,
                      d_min_loss=args.d_min_loss,
                      g_max_steps=args.g_max_steps,
                      d_max_steps=args.d_max_steps,
                      mc_sample_size=args.monte_carlo_sample_size,
                      mc_sample_factor=args.monte_carlo_sample_factor,
                      first_to_train=args.first_to_train,
                      control_ratio=args.control_ratio,
                      save_interval=args.save_interval,
                      enable_logging=args.enable_logging) 
開發者ID:djosix,項目名稱:Performance-RNN-PyTorch,代碼行數:26,代碼來源:adversarial.py

示例14: loadOptimizer

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import optim [as 別名]
def loadOptimizer(self, network, config_dict):
        params_all_id = list(map(id, network.parameters()))
        params_posenet_id = list(map(id, network.to_pose.parameters()))
        params_toOptimize = [p for p in network.parameters() if id(p) in params_posenet_id]

        params_static_id = [id_p for id_p in params_all_id if not id_p in params_posenet_id]

        # disable gradient computation for static params, saves memory and computation
        for p in network.parameters():
            if id(p) in params_static_id:
                p.requires_grad = False

        print("Normal learning rate: {} params".format(len(params_posenet_id)))
        print("Static learning rate: {} params".format(len(params_static_id)))
        print("Total: {} params".format(len(params_all_id)))

        opt_params = [{'params': params_toOptimize, 'lr': config_dict['learning_rate']}]
        optimizer = torch.optim.Adam(opt_params, lr=config_dict['learning_rate']) #weight_decay=0.0005
        return optimizer 
開發者ID:hrhodin,項目名稱:UnsupervisedGeometryAwareRepresentationLearning,代碼行數:21,代碼來源:train_encodeDecode_pose.py

示例15: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import optim [as 別名]
def __init__(self, state_dim, action_dim, max_action):
        self.actor = Actor(state_dim, action_dim, max_action).to(device)
        self.actor_target = Actor(state_dim, action_dim, max_action).to(device)
        self.actor_target.load_state_dict(self.actor.state_dict())
        self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=1e-4)

        self.critic = Critic(state_dim, action_dim).to(device)
        self.critic_target = Critic(state_dim, action_dim).to(device)
        self.critic_target.load_state_dict(self.critic.state_dict())
        self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=1e-3)
        self.replay_buffer = Replay_buffer()
        self.writer = SummaryWriter(directory)

        self.num_critic_update_iteration = 0
        self.num_actor_update_iteration = 0
        self.num_training = 0 
開發者ID:sweetice,項目名稱:Deep-reinforcement-learning-with-pytorch,代碼行數:18,代碼來源:DDPG.py


注:本文中的torch.optim方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。