当前位置: 首页>>代码示例>>Python>>正文


Python optim.Adadelta方法代码示例

本文整理汇总了Python中torch.optim.Adadelta方法的典型用法代码示例。如果您正苦于以下问题:Python optim.Adadelta方法的具体用法?Python optim.Adadelta怎么用?Python optim.Adadelta使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.optim的用法示例。


在下文中一共展示了optim.Adadelta方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adadelta [as 别名]
def get_optimizer(model):
    parameters = _get_paramters(model)
    opt_lower = cfg.SOLVER.OPTIMIZER.lower()

    if opt_lower == 'sgd':
        optimizer = optim.SGD(
            parameters, lr=cfg.SOLVER.LR, momentum=cfg.SOLVER.MOMENTUM, weight_decay=cfg.SOLVER.WEIGHT_DECAY)
    elif opt_lower == 'adam':
        optimizer = optim.Adam(
            parameters, lr=cfg.SOLVER.LR, eps=cfg.SOLVER.EPSILON, weight_decay=cfg.SOLVER.WEIGHT_DECAY)
    elif opt_lower == 'adadelta':
        optimizer = optim.Adadelta(
            parameters, lr=cfg.SOLVER.LR, eps=cfg.SOLVER.EPSILON, weight_decay=cfg.SOLVER.WEIGHT_DECAY)
    elif opt_lower == 'rmsprop':
        optimizer = optim.RMSprop(
            parameters, lr=cfg.SOLVER.LR, alpha=0.9, eps=cfg.SOLVER.EPSILON,
            momentum=cfg.SOLVER.MOMENTUM, weight_decay=cfg.SOLVER.WEIGHT_DECAY)
    else:
        raise ValueError("Expected optimizer method in [sgd, adam, adadelta, rmsprop], but received "
                         "{}".format(opt_lower))

    return optimizer 
开发者ID:LikeLy-Journey,项目名称:SegmenTron,代码行数:24,代码来源:optimizer.py

示例2: set_parameters

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adadelta [as 别名]
def set_parameters(self, params):
        self.params = [p for p in params if p.requires_grad]
        if self.method == 'sgd':
            self.optimizer = optim.SGD(self.params, lr=self.lr)
        elif self.method == 'adagrad':
            self.optimizer = optim.Adagrad(self.params, lr=self.lr)
            for group in self.optimizer.param_groups:
                for p in group['params']:
                    self.optimizer.state[p]['sum'] = self.optimizer\
                        .state[p]['sum'].fill_(self.adagrad_accum)
        elif self.method == 'adadelta':
            self.optimizer = optim.Adadelta(self.params, lr=self.lr)
        elif self.method == 'adam':
            self.optimizer = optim.Adam(self.params, lr=self.lr,
                                        betas=self.betas, eps=1e-9)
        else:
            raise RuntimeError("Invalid optim method: " + self.method) 
开发者ID:matthewmackay,项目名称:reversible-rnn,代码行数:19,代码来源:Optim.py

示例3: set_train_model

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adadelta [as 别名]
def set_train_model(self, model):
		print("Initializing training model...")
		self.model = model
		self.trainModel = self.model(config = self)
		if self.pretrain_model != None:
			self.trainModel.load_state_dict(torch.load(self.pretrain_model))
		self.trainModel.cuda()
		if self.optimizer != None:
			pass
		elif self.opt_method == "Adagrad" or self.opt_method == "adagrad":
			self.optimizer = optim.Adagrad(self.trainModel.parameters(), lr = self.learning_rate, lr_decay = self.lr_decay, weight_decay = self.weight_decay)
		elif self.opt_method == "Adadelta" or self.opt_method == "adadelta":
			self.optimizer = optim.Adadelta(self.trainModel.parameters(), lr = self.learning_rate, weight_decay = self.weight_decay)
		elif self.opt_method == "Adam" or self.opt_method == "adam":
			self.optimizer = optim.Adam(self.trainModel.parameters(), lr = self.learning_rate, weight_decay = self.weight_decay)
		else:
			self.optimizer = optim.SGD(self.trainModel.parameters(), lr = self.learning_rate, weight_decay = self.weight_decay)
		print("Finish initializing") 
开发者ID:ShulinCao,项目名称:OpenNRE-PyTorch,代码行数:20,代码来源:Config.py

示例4: create_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adadelta [as 别名]
def create_optimizer(model, new_lr):
    # setup optimizer
    if args.optimizer == 'sgd':
        optimizer = optim.SGD(model.parameters(), lr=new_lr,
                                momentum=0.9,
                                weight_decay= 5e-4)
    elif args.optimizer == 'adam':
        optimizer = optim.Adam(model.parameters(),
                                lr = args.lr,                            
                                weight_decay=5e-4)
    elif args.optimizer == 'adadelta':
        optimizer = optim.Adadelta(model.parameters(),
                                lr = args.lr,      
                                rho = 0.95,
                                eps = 1e-06)
    return optimizer 
开发者ID:tjddus9597,项目名称:Beyond-Binary-Supervision-CVPR19,代码行数:18,代码来源:main.py

示例5: set_parameters

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adadelta [as 别名]
def set_parameters(self, params):
        """ ? """
        self.params = []
        self.sparse_params = []
        for k, p in params:
            if p.requires_grad:
                if self.method != 'sparseadam' or "embed" not in k:
                    self.params.append(p)
                else:
                    self.sparse_params.append(p)
        if self.method == 'sgd':
            self.optimizer = optim.SGD(self.params, lr=self.learning_rate)
        elif self.method == 'adagrad':
            self.optimizer = optim.Adagrad(self.params, lr=self.learning_rate)
            for group in self.optimizer.param_groups:
                for p in group['params']:
                    self.optimizer.state[p]['sum'] = self.optimizer\
                        .state[p]['sum'].fill_(self.adagrad_accum)
        elif self.method == 'adadelta':
            self.optimizer = optim.Adadelta(self.params, lr=self.learning_rate)
        elif self.method == 'adam':
            self.optimizer = optim.Adam(self.params, lr=self.learning_rate,
                                        betas=self.betas, eps=1e-9)
        else:
            raise RuntimeError("Invalid optim method: " + self.method) 
开发者ID:nlpyang,项目名称:PreSumm,代码行数:27,代码来源:optimizers.py

示例6: get_optim

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adadelta [as 别名]
def get_optim(lr):
    # Lower the learning rate on the VGG fully connected layers by 1/10th. It's a hack, but it helps
    # stabilize the models.
    fc_params = [p for n,p in detector.named_parameters() if n.startswith('roi_fmap') and p.requires_grad]
    non_fc_params = [p for n,p in detector.named_parameters() if not n.startswith('roi_fmap') and p.requires_grad]
    params = [{'params': fc_params, 'lr': lr / 10.0}, {'params': non_fc_params}]
    # params = [p for n,p in detector.named_parameters() if p.requires_grad]

    if conf.adam:
        optimizer = optim.Adadelta(params, weight_decay=conf.l2, lr=lr, eps=1e-3)
    else:
        optimizer = optim.SGD(params, weight_decay=conf.l2, lr=lr, momentum=0.9)

    #scheduler = StepLR(optimizer, step_size=1, gamma=0.5)
    scheduler = ReduceLROnPlateau(optimizer, 'max', patience=2, factor=0.5,
                                  verbose=True, threshold=0.0001, threshold_mode='abs', cooldown=1)
    return optimizer, scheduler 
开发者ID:KaihuaTang,项目名称:VCTree-Scene-Graph-Generation,代码行数:19,代码来源:train_rels.py

示例7: __define_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adadelta [as 别名]
def __define_optimizer(self, learning_rate, weight_decay,
                           lr_drop_factor, lr_drop_patience, optimizer='Adam'):
        assert optimizer in ['RMSprop', 'Adam', 'Adadelta', 'SGD']

        parameters = ifilter(lambda p: p.requires_grad,
                             self.model.parameters())

        if optimizer == 'RMSprop':
            self.optimizer = optim.RMSprop(
                parameters, lr=learning_rate, weight_decay=weight_decay)
        elif optimizer == 'Adadelta':
            self.optimizer = optim.Adadelta(
                parameters, lr=learning_rate, weight_decay=weight_decay)
        elif optimizer == 'Adam':
            self.optimizer = optim.Adam(
                parameters, lr=learning_rate, weight_decay=weight_decay)
        elif optimizer == 'SGD':
            self.optimizer = optim.SGD(
                parameters, lr=learning_rate, momentum=0.9,
                weight_decay=weight_decay)

        self.lr_scheduler = ReduceLROnPlateau(
            self.optimizer, mode='min', factor=lr_drop_factor,
            patience=lr_drop_patience, verbose=True) 
开发者ID:Wizaron,项目名称:instance-segmentation-pytorch,代码行数:26,代码来源:model.py

示例8: set_model

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adadelta [as 别名]
def set_model(self, model):
        self.model = model
        self.trainModel = self.model(config=self)
        self.trainModel.cuda()
        if self.optimizer is not None:
            pass
        elif self.opt_method == "Adagrad" or self.opt_method == "adagrad":
            self.optimizer = optim.Adagrad(self.trainModel.parameters(), lr=self.alpha,
                                           lr_decay=self.lr_decay, weight_decay=self.weight_decay)
        elif self.opt_method == "Adadelta" or self.opt_method == "adadelta":
            self.optimizer = optim.Adadelta(
                self.trainModel.parameters(), lr=self.alpha)
        elif self.opt_method == "Adam" or self.opt_method == "adam":
            self.optimizer = optim.Adam(
                self.trainModel.parameters(), lr=self.alpha)
        else:
            self.optimizer = optim.SGD(
                self.trainModel.parameters(), lr=self.alpha) 
开发者ID:BUPTDM,项目名称:OpenHINE,代码行数:20,代码来源:RHINE.py

示例9: create_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adadelta [as 别名]
def create_optimizer(parameters, opt):
    lr = opt.learning_rate
    # default learning rates:
    # sgd - 0.5, adagrad - 0.01, adadelta - 1, adam - 0.001, adamax - 0.002, asgd - 0.01, rmsprop - 0.01, rprop - 0.01
    optim_method = opt.optim_method.casefold()
    if optim_method == 'sgd':
        optimizer = optim.SGD(parameters, lr=lr if lr else 0.5, weight_decay=opt.weight_decay)
    elif optim_method == 'adagrad':
        optimizer = optim.Adagrad(parameters, lr=lr if lr else 0.01, weight_decay=opt.weight_decay)
    elif optim_method == 'adadelta':
        optimizer = optim.Adadelta(parameters, lr=lr if lr else 1, weight_decay=opt.weight_decay)
    elif optim_method == 'adam':
        optimizer = optim.Adam(parameters, lr=lr if lr else 0.001, weight_decay=opt.weight_decay)
    elif optim_method == 'adamax':
        optimizer = optim.Adamax(parameters, lr=lr if lr else 0.002, weight_decay=opt.weight_decay)
    elif optim_method == 'asgd':
        optimizer = optim.ASGD(parameters, lr=lr if lr else 0.01, t0=5000, weight_decay=opt.weight_decay)
    elif optim_method == 'rmsprop':
        optimizer = optim.RMSprop(parameters, lr=lr if lr else 0.01, weight_decay=opt.weight_decay)
    elif optim_method == 'rprop':
        optimizer = optim.Rprop(parameters, lr=lr if lr else 0.01)
    else:
        raise RuntimeError("Invalid optim method: " + opt.optim_method)
    return optimizer 
开发者ID:RemiLeblond,项目名称:SeaRNN-open,代码行数:26,代码来源:optimization.py

示例10: set_model

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adadelta [as 别名]
def set_model(self, model):
        self.model = model
        self.trainModel = self.model(config=self)

        if self.use_cuda:
            self.trainModel = self.trainModel.cuda()
        if self.optimizer is not None:
            pass
        elif self.opt_method == "Adagrad" or self.opt_method == "adagrad":
            self.optimizer = optim.Adagrad(self.trainModel.parameters(), lr=self.alpha,lr_decay=self.lr_decay,weight_decay=self.weight_decay)
        elif self.opt_method == "Adadelta" or self.opt_method == "adadelta":
            self.optimizer = optim.Adadelta(self.trainModel.parameters(), lr=self.alpha)
        elif self.opt_method == "Adam" or self.opt_method == "adam":
            self.optimizer = optim.Adam(self.trainModel.parameters(), lr=self.alpha)
        else:
            self.optimizer = optim.SGD(self.trainModel.parameters(), lr=self.alpha) 
开发者ID:YueLiu,项目名称:NeuralTripleTranslation,代码行数:18,代码来源:Config.py

示例11: make_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adadelta [as 别名]
def make_optimizer(config, model):
    mode = config['mode']
    config = config['aspect_' + mode + '_model'][config['aspect_' + mode + '_model']['type']]
    lr = config['learning_rate']
    weight_decay = config['weight_decay']
    opt = {
        'sgd': optim.SGD,
        'adadelta': optim.Adadelta,
        'adam': optim.Adam,
        'adamax': optim.Adamax,
        'adagrad': optim.Adagrad,
        'asgd': optim.ASGD,
        'rmsprop': optim.RMSprop,
        'adabound': adabound.AdaBound
    }
    if 'momentum' in config:
        optimizer = opt[config['optimizer']](model.parameters(), lr=lr, weight_decay=weight_decay, momentum=config['momentum'])
    else:
        optimizer = opt[config['optimizer']](model.parameters(), lr=lr, weight_decay=weight_decay)
    return optimizer 
开发者ID:siat-nlp,项目名称:MAMS-for-ABSA,代码行数:22,代码来源:make_optimizer.py

示例12: apply_update

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adadelta [as 别名]
def apply_update(self, weights, gradient):
        """Update the running averages of gradients and weight updates,
            and compute the Adadelta update for this step."""
        if self.running_g2 is None:
            self.running_g2 = [ np.zeros_like(g) for g in gradient ]
        if self.running_dx2 is None:
            self.running_dx2 = [ np.zeros_like(g) for g in gradient ]

        self.running_g2 = self.running_average_square( self.running_g2, gradient )
        new_weights = []
        updates = []
        for w, g, g2, dx2 in zip(weights, gradient, self.running_g2, self.running_dx2):
            update = np.multiply( np.divide( self.sqrt_plus_epsilon(dx2), self.sqrt_plus_epsilon(g2) ), g )
            new_weights.append( np.subtract( w, update ) )
            updates.append(update)
        self.running_dx2 = self.running_average_square( self.running_dx2, updates )
        return new_weights 
开发者ID:vlimant,项目名称:mpi_learn,代码行数:19,代码来源:optimizer.py

示例13: build_torch

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adadelta [as 别名]
def build_torch(self, model):
        import torch
        lookup = {
            'sgd':      torch.optim.SGD,
            'adadelta': torch.optim.Adadelta,
            'rmsprop':  torch.optim.RMSprop,
            'adam':     torch.optim.Adam
            }
        if self.name not in lookup:
            logging.warning("No optimizer '{}' found, using SGD instead".format(self.name))
            self.name = 'sgd'
        opt = lookup[self.name](model.parameters(), **self.config)
        if self.horovod_wrapper:
            import horovod.torch as hvd
            opt = hvd.DistributedOptimizer(opt, named_parameters=model.named_parameters())
        return opt 
开发者ID:vlimant,项目名称:mpi_learn,代码行数:18,代码来源:optimizer.py

示例14: set_parameters

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adadelta [as 别名]
def set_parameters(self, params):
        """ ? """
        self.params = []
        self.sparse_params = []
        for k, p in params:
            if p.requires_grad:
                if self.method != 'sparseadam' or "embed" not in k:
                    self.params.append(p)
                else:
                    self.sparse_params.append(p)
        if self.method == 'sgd':
            self.optimizer = optim.SGD(self.params, lr=self.learning_rate)
        elif self.method == 'adagrad':
            self.optimizer = optim.Adagrad(self.params, lr=self.learning_rate)
            for group in self.optimizer.param_groups:
                for p in group['params']:
                    self.optimizer.state[p]['sum'] = self.optimizer\
                        .state[p]['sum'].fill_(self.adagrad_accum)
        elif self.method == 'adadelta':
            self.optimizer = optim.Adadelta(self.params, lr=self.learning_rate)
        elif self.method == 'adam':
            self.optimizer = optim.Adam(self.params, lr=self.learning_rate,
                                        betas=self.betas, eps=self.adam_eps)
        else:
            raise RuntimeError("Invalid optim method: " + self.method) 
开发者ID:HKUST-KnowComp,项目名称:ASER,代码行数:27,代码来源:optim.py

示例15: __define_optimizer

# 需要导入模块: from torch import optim [as 别名]
# 或者: from torch.optim import Adadelta [as 别名]
def __define_optimizer(self, learning_rate, weight_decay, lr_drop_factor, lr_drop_patience, rnn_model_num, optimizer='Adam'):
        assert optimizer in ['RMSprop', 'Adam', 'Adadelta', 'SGD']

        for rnn_model_num_counter in range(1, self.moving_horizon + 1):
            if rnn_model_num_counter == rnn_model_num:
                for param in self.model.rnn_models[rnn_model_num_counter - 1].parameters():
                    param.requires_grad = True
            else:
                for param in self.model.rnn_models[rnn_model_num_counter - 1].parameters():
                    param.requires_grad = False

        parameters = ifilter(lambda p: p.requires_grad, self.model.rnn_models[rnn_model_num - 1].parameters())

        if optimizer == 'RMSprop':
            self.optimizer = optim.RMSprop(parameters, lr=learning_rate, weight_decay=weight_decay)
        elif optimizer == 'Adadelta':
            self.optimizer = optim.Adadelta(parameters, lr=learning_rate, weight_decay=weight_decay)
        elif optimizer == 'Adam':
            self.optimizer = optim.Adam(parameters, lr=learning_rate, weight_decay=weight_decay)
        elif optimizer == 'SGD':
            self.optimizer = optim.SGD(parameters, lr=learning_rate, momentum=0.9, weight_decay=weight_decay)

        self.lr_scheduler = ReduceLROnPlateau(self.optimizer, mode='min', factor=lr_drop_factor, patience=lr_drop_patience, verbose=True) 
开发者ID:Wizaron,项目名称:deep-forecast-pytorch,代码行数:25,代码来源:model.py


注:本文中的torch.optim.Adadelta方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。