当前位置: 首页>>代码示例>>Python>>正文


Python adabound.AdaBound方法代码示例

本文整理汇总了Python中adabound.AdaBound方法的典型用法代码示例。如果您正苦于以下问题:Python adabound.AdaBound方法的具体用法?Python adabound.AdaBound怎么用?Python adabound.AdaBound使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在adabound的用法示例。


在下文中一共展示了adabound.AdaBound方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_optimizer

# 需要导入模块: import adabound [as 别名]
# 或者: from adabound import AdaBound [as 别名]
def create_optimizer(args, optim_params):
    if args.optimizer == 'sgd':
        return optim.SGD(optim_params, args.lr, momentum=args.momentum,
                         weight_decay=args.weight_decay)
    elif args.optimizer == 'adagrad':
        return optim.Adagrad(optim_params, args.lr, weight_decay=args.weight_decay)
    elif args.optimizer == 'adam':
        return optim.Adam(optim_params, args.lr, betas=(args.beta1, args.beta2),
                          weight_decay=args.weight_decay)
    elif args.optimizer == 'amsgrad':
        return optim.Adam(optim_params, args.lr, betas=(args.beta1, args.beta2),
                          weight_decay=args.weight_decay, amsgrad=True)
    elif args.optimizer == 'adabound':
        from adabound import AdaBound
        return AdaBound(optim_params, args.lr, betas=(args.beta1, args.beta2),
                        final_lr=args.final_lr, gamma=args.gamma,
                        weight_decay=args.weight_decay)
    else:
        assert args.optimizer == 'amsbound'
        from adabound import AdaBound
        return AdaBound(optim_params, args.lr, betas=(args.beta1, args.beta2),
                        final_lr=args.final_lr, gamma=args.gamma, 
                        weight_decay=args.weight_decay, amsbound=True) 
开发者ID:miraiaroha,项目名称:ACAN,代码行数:25,代码来源:optimizer.py

示例2: get_optimizer

# 需要导入模块: import adabound [as 别名]
# 或者: from adabound import AdaBound [as 别名]
def get_optimizer(lr):
    if args.optim=='adam':
        args.clearmomentum=True
        return torch.optim.Adam(model.parameters(),lr=lr)
    elif args.optim=='sgd':
        args.clearmomentum=True
        return torch.optim.SGD(model.parameters(),lr=lr)
    elif args.optim=='sgdm':
        args.clearmomentum=False
        return torch.optim.SGD(model.parameters(),lr=lr,momentum=0.85)
    elif args.optim=='adabound':
        import adabound
        args.clearmomentum=False
        return adabound.AdaBound(model.parameters(),lr=lr)
    return None

########################################################################################################################

# Init 
开发者ID:joansj,项目名称:blow,代码行数:21,代码来源:train.py

示例3: adjust_learning_rate

# 需要导入模块: import adabound [as 别名]
# 或者: from adabound import AdaBound [as 别名]
def adjust_learning_rate(lr_scheduler: Union[optim.lr_scheduler.StepLR, optim.lr_scheduler.ReduceLROnPlateau],
                         epoch: int, train_loss: float, dev_f1: float) -> bool:
    if isinstance(lr_scheduler, optim.lr_scheduler.StepLR):
        if isinstance(lr_scheduler.optimizer, AdaBound):
            lr_scheduler.step(epoch=epoch)
            return epoch < 200
        else:
            raise ValueError
    elif isinstance(lr_scheduler, optim.lr_scheduler.ReduceLROnPlateau):
        if isinstance(lr_scheduler.optimizer, optim.SGD):
            lr_scheduler.step(train_loss)
            return lr_scheduler.optimizer.param_groups[0]['lr'] >= 0.0001
        elif isinstance(lr_scheduler.optimizer, optim.Adam):
            lr_scheduler.step(dev_f1)
            return lr_scheduler.optimizer.param_groups[0]['lr'] >= 0.00001
        else:
            raise ValueError
    else:
        raise ValueError 
开发者ID:yahshibu,项目名称:nested-ner-tacl2020-transformers,代码行数:21,代码来源:utils.py

示例4: create_opt

# 需要导入模块: import adabound [as 别名]
# 或者: from adabound import AdaBound [as 别名]
def create_opt(parameters: Iterator, opt: Optimizer, lr: float = None, l2: float = None, lr_patience: int = None):
    if opt == Optimizer.AdaBound:
        optimizer = AdaBound(parameters, lr=lr if lr is not None else 0.001,
                             weight_decay=l2 if l2 is not None else 0.)
        lr_scheduler = optim.lr_scheduler.StepLR(optimizer, 150, gamma=0.1)
    elif opt == Optimizer.SGD:
        optimizer = optim.SGD(parameters, lr=lr if lr is not None else 0.1,
                              weight_decay=l2 if l2 is not None else 0.)
        lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5,
                                                            patience=lr_patience if lr_patience is not None else 5)
    elif opt == Optimizer.Adam:
        optimizer = optim.Adam(parameters, lr=lr if lr is not None else 0.001,
                               weight_decay=l2 if l2 is not None else 0.)
        lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.1,
                                                            patience=lr_patience if lr_patience is not None else 3)
    else:
        raise ValueError
    return optimizer, lr_scheduler 
开发者ID:yahshibu,项目名称:nested-ner-tacl2020-transformers,代码行数:20,代码来源:utils.py

示例5: make_optimizer

# 需要导入模块: import adabound [as 别名]
# 或者: from adabound import AdaBound [as 别名]
def make_optimizer(config, model):
    mode = config['mode']
    config = config['aspect_' + mode + '_model'][config['aspect_' + mode + '_model']['type']]
    lr = config['learning_rate']
    weight_decay = config['weight_decay']
    opt = {
        'sgd': optim.SGD,
        'adadelta': optim.Adadelta,
        'adam': optim.Adam,
        'adamax': optim.Adamax,
        'adagrad': optim.Adagrad,
        'asgd': optim.ASGD,
        'rmsprop': optim.RMSprop,
        'adabound': adabound.AdaBound
    }
    if 'momentum' in config:
        optimizer = opt[config['optimizer']](model.parameters(), lr=lr, weight_decay=weight_decay, momentum=config['momentum'])
    else:
        optimizer = opt[config['optimizer']](model.parameters(), lr=lr, weight_decay=weight_decay)
    return optimizer 
开发者ID:siat-nlp,项目名称:MAMS-for-ABSA,代码行数:22,代码来源:make_optimizer.py

示例6: get_parser

# 需要导入模块: import adabound [as 别名]
# 或者: from adabound import AdaBound [as 别名]
def get_parser():
    parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
    parser.add_argument('--model', default='resnet', type=str, help='model',
                        choices=['resnet', 'densenet'])
    parser.add_argument('--optim', default='sgd', type=str, help='optimizer',
                        choices=['sgd', 'adagrad', 'adam', 'amsgrad', 'adabound', 'amsbound'])
    parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
    parser.add_argument('--final_lr', default=0.1, type=float,
                        help='final learning rate of AdaBound')
    parser.add_argument('--gamma', default=1e-3, type=float,
                        help='convergence speed term of AdaBound')
    parser.add_argument('--momentum', default=0.9, type=float, help='momentum term')
    parser.add_argument('--beta1', default=0.9, type=float, help='Adam coefficients beta_1')
    parser.add_argument('--beta2', default=0.999, type=float, help='Adam coefficients beta_2')
    parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
    parser.add_argument('--weight_decay', default=5e-4, type=float,
                        help='weight decay for optimizers')
    return parser 
开发者ID:Luolc,项目名称:AdaBound,代码行数:20,代码来源:main.py

示例7: create_optimizer

# 需要导入模块: import adabound [as 别名]
# 或者: from adabound import AdaBound [as 别名]
def create_optimizer(args, model_params):
    if args.optim == 'sgd':
        return optim.SGD(model_params, args.lr, momentum=args.momentum,
                         weight_decay=args.weight_decay)
    elif args.optim == 'adagrad':
        return optim.Adagrad(model_params, args.lr, weight_decay=args.weight_decay)
    elif args.optim == 'adam':
        return optim.Adam(model_params, args.lr, betas=(args.beta1, args.beta2),
                          weight_decay=args.weight_decay)
    elif args.optim == 'amsgrad':
        return optim.Adam(model_params, args.lr, betas=(args.beta1, args.beta2),
                          weight_decay=args.weight_decay, amsgrad=True)
    elif args.optim == 'adabound':
        return AdaBound(model_params, args.lr, betas=(args.beta1, args.beta2),
                        final_lr=args.final_lr, gamma=args.gamma,
                        weight_decay=args.weight_decay)
    else:
        assert args.optim == 'amsbound'
        return AdaBound(model_params, args.lr, betas=(args.beta1, args.beta2),
                        final_lr=args.final_lr, gamma=args.gamma,
                        weight_decay=args.weight_decay, amsbound=True) 
开发者ID:Luolc,项目名称:AdaBound,代码行数:23,代码来源:main.py

示例8: test_same

# 需要导入模块: import adabound [as 别名]
# 或者: from adabound import AdaBound [as 别名]
def test_same(self):
        if sys.version_info[0] < 3:
            return
        self.reset_seed(0xcafe)
        w, b = self.gen_random_weights()
        torch_linear = self.gen_torch_linear(w, b)
        keras_linear = self.gen_keras_linear(w, b)
        model_path = os.path.join(tempfile.gettempdir(), 'keras_adabound.h5')
        keras_linear.save(model_path)
        keras_linear = keras.models.load_model(model_path, custom_objects={'AdaBound': AdaBound})
        w, b = self.gen_random_weights()
        criterion = torch.nn.MSELoss()
        optimizer = OfficialAdaBound(torch_linear.parameters(), lr=1e-3, final_lr=0.1, eps=K.epsilon())
        for i in range(300):
            x = np.random.standard_normal((1, 3))
            y = np.dot(x, w) + b
            optimizer.zero_grad()
            y_hat = torch_linear(torch.Tensor(x.tolist()))
            loss = criterion(y_hat, torch.Tensor(y.tolist()))
            torch_loss = loss.tolist()
            loss.backward()
            optimizer.step()
            keras_loss = keras_linear.train_on_batch(x, y).tolist()
            # print(i, torch_loss, keras_loss)
        self.assertTrue(abs(torch_loss - keras_loss) < 1e-2)
        self.assertTrue(np.allclose(
            torch_linear.weight.detach().numpy().transpose(),
            keras_linear.get_weights()[0],
            atol=1e-2,
        ))
        self.assertTrue(np.allclose(
            torch_linear.bias.detach().numpy(),
            keras_linear.get_weights()[1],
            atol=1e-2,
        )) 
开发者ID:CyberZHG,项目名称:keras-adabound,代码行数:37,代码来源:test_similar.py

示例9: test_same_amsgrad

# 需要导入模块: import adabound [as 别名]
# 或者: from adabound import AdaBound [as 别名]
def test_same_amsgrad(self):
        if sys.version_info[0] < 3:
            return
        self.reset_seed(0xcafe)
        w, b = self.gen_random_weights()
        torch_linear = self.gen_torch_linear(w, b)
        keras_linear = self.gen_keras_linear(w, b, amsgrad=True)
        w, b = self.gen_random_weights()
        criterion = torch.nn.MSELoss()
        optimizer = OfficialAdaBound(
            torch_linear.parameters(),
            lr=1e-3,
            final_lr=0.1,
            eps=K.epsilon(),
            amsbound=True,
        )
        for i in range(300):
            x = np.random.standard_normal((1, 3))
            y = np.dot(x, w) + b
            optimizer.zero_grad()
            y_hat = torch_linear(torch.Tensor(x.tolist()))
            loss = criterion(y_hat, torch.Tensor(y.tolist()))
            torch_loss = loss.tolist()
            loss.backward()
            optimizer.step()
            keras_loss = keras_linear.train_on_batch(x, y).tolist()
            # print(i, torch_loss, keras_loss)
        self.assertTrue(abs(torch_loss - keras_loss) < 1e-2)
        self.assertTrue(np.allclose(
            torch_linear.weight.detach().numpy().transpose(),
            keras_linear.get_weights()[0],
            atol=1e-2,
        ))
        self.assertTrue(np.allclose(
            torch_linear.bias.detach().numpy(),
            keras_linear.get_weights()[1],
            atol=1e-2,
        )) 
开发者ID:CyberZHG,项目名称:keras-adabound,代码行数:40,代码来源:test_similar.py

示例10: test_same_weight_decay

# 需要导入模块: import adabound [as 别名]
# 或者: from adabound import AdaBound [as 别名]
def test_same_weight_decay(self):
        if sys.version_info[0] < 3:
            return
        self.reset_seed(0xcafe)
        w, b = self.gen_random_weights()
        torch_linear = self.gen_torch_linear(w, b)
        keras_linear = self.gen_keras_linear(w, b, weight_decay=0.1)
        w, b = self.gen_random_weights()
        criterion = torch.nn.MSELoss()
        optimizer = OfficialAdaBound(
            torch_linear.parameters(),
            lr=1e-3,
            final_lr=0.1,
            eps=K.epsilon(),
            weight_decay=0.1,
        )
        for i in range(300):
            x = np.random.standard_normal((1, 3))
            y = np.dot(x, w) + b
            optimizer.zero_grad()
            y_hat = torch_linear(torch.Tensor(x.tolist()))
            loss = criterion(y_hat, torch.Tensor(y.tolist()))
            torch_loss = loss.tolist()
            loss.backward()
            optimizer.step()
            keras_loss = keras_linear.train_on_batch(x, y).tolist()
            # print(i, torch_loss, keras_loss)
        self.assertTrue(abs(torch_loss - keras_loss) < 1e-2)
        self.assertTrue(np.allclose(
            torch_linear.weight.detach().numpy().transpose(),
            keras_linear.get_weights()[0],
            atol=1e-2,
        ))
        self.assertTrue(np.allclose(
            torch_linear.bias.detach().numpy(),
            keras_linear.get_weights()[1],
            atol=1e-2,
        )) 
开发者ID:CyberZHG,项目名称:keras-adabound,代码行数:40,代码来源:test_similar.py


注:本文中的adabound.AdaBound方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。