當前位置: 首頁>>代碼示例>>Python>>正文


Python adabound.AdaBound方法代碼示例

本文整理匯總了Python中adabound.AdaBound方法的典型用法代碼示例。如果您正苦於以下問題:Python adabound.AdaBound方法的具體用法?Python adabound.AdaBound怎麽用?Python adabound.AdaBound使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在adabound的用法示例。


在下文中一共展示了adabound.AdaBound方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: create_optimizer

# 需要導入模塊: import adabound [as 別名]
# 或者: from adabound import AdaBound [as 別名]
def create_optimizer(args, optim_params):
    if args.optimizer == 'sgd':
        return optim.SGD(optim_params, args.lr, momentum=args.momentum,
                         weight_decay=args.weight_decay)
    elif args.optimizer == 'adagrad':
        return optim.Adagrad(optim_params, args.lr, weight_decay=args.weight_decay)
    elif args.optimizer == 'adam':
        return optim.Adam(optim_params, args.lr, betas=(args.beta1, args.beta2),
                          weight_decay=args.weight_decay)
    elif args.optimizer == 'amsgrad':
        return optim.Adam(optim_params, args.lr, betas=(args.beta1, args.beta2),
                          weight_decay=args.weight_decay, amsgrad=True)
    elif args.optimizer == 'adabound':
        from adabound import AdaBound
        return AdaBound(optim_params, args.lr, betas=(args.beta1, args.beta2),
                        final_lr=args.final_lr, gamma=args.gamma,
                        weight_decay=args.weight_decay)
    else:
        assert args.optimizer == 'amsbound'
        from adabound import AdaBound
        return AdaBound(optim_params, args.lr, betas=(args.beta1, args.beta2),
                        final_lr=args.final_lr, gamma=args.gamma, 
                        weight_decay=args.weight_decay, amsbound=True) 
開發者ID:miraiaroha,項目名稱:ACAN,代碼行數:25,代碼來源:optimizer.py

示例2: get_optimizer

# 需要導入模塊: import adabound [as 別名]
# 或者: from adabound import AdaBound [as 別名]
def get_optimizer(lr):
    if args.optim=='adam':
        args.clearmomentum=True
        return torch.optim.Adam(model.parameters(),lr=lr)
    elif args.optim=='sgd':
        args.clearmomentum=True
        return torch.optim.SGD(model.parameters(),lr=lr)
    elif args.optim=='sgdm':
        args.clearmomentum=False
        return torch.optim.SGD(model.parameters(),lr=lr,momentum=0.85)
    elif args.optim=='adabound':
        import adabound
        args.clearmomentum=False
        return adabound.AdaBound(model.parameters(),lr=lr)
    return None

########################################################################################################################

# Init 
開發者ID:joansj,項目名稱:blow,代碼行數:21,代碼來源:train.py

示例3: adjust_learning_rate

# 需要導入模塊: import adabound [as 別名]
# 或者: from adabound import AdaBound [as 別名]
def adjust_learning_rate(lr_scheduler: Union[optim.lr_scheduler.StepLR, optim.lr_scheduler.ReduceLROnPlateau],
                         epoch: int, train_loss: float, dev_f1: float) -> bool:
    if isinstance(lr_scheduler, optim.lr_scheduler.StepLR):
        if isinstance(lr_scheduler.optimizer, AdaBound):
            lr_scheduler.step(epoch=epoch)
            return epoch < 200
        else:
            raise ValueError
    elif isinstance(lr_scheduler, optim.lr_scheduler.ReduceLROnPlateau):
        if isinstance(lr_scheduler.optimizer, optim.SGD):
            lr_scheduler.step(train_loss)
            return lr_scheduler.optimizer.param_groups[0]['lr'] >= 0.0001
        elif isinstance(lr_scheduler.optimizer, optim.Adam):
            lr_scheduler.step(dev_f1)
            return lr_scheduler.optimizer.param_groups[0]['lr'] >= 0.00001
        else:
            raise ValueError
    else:
        raise ValueError 
開發者ID:yahshibu,項目名稱:nested-ner-tacl2020-transformers,代碼行數:21,代碼來源:utils.py

示例4: create_opt

# 需要導入模塊: import adabound [as 別名]
# 或者: from adabound import AdaBound [as 別名]
def create_opt(parameters: Iterator, opt: Optimizer, lr: float = None, l2: float = None, lr_patience: int = None):
    if opt == Optimizer.AdaBound:
        optimizer = AdaBound(parameters, lr=lr if lr is not None else 0.001,
                             weight_decay=l2 if l2 is not None else 0.)
        lr_scheduler = optim.lr_scheduler.StepLR(optimizer, 150, gamma=0.1)
    elif opt == Optimizer.SGD:
        optimizer = optim.SGD(parameters, lr=lr if lr is not None else 0.1,
                              weight_decay=l2 if l2 is not None else 0.)
        lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5,
                                                            patience=lr_patience if lr_patience is not None else 5)
    elif opt == Optimizer.Adam:
        optimizer = optim.Adam(parameters, lr=lr if lr is not None else 0.001,
                               weight_decay=l2 if l2 is not None else 0.)
        lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.1,
                                                            patience=lr_patience if lr_patience is not None else 3)
    else:
        raise ValueError
    return optimizer, lr_scheduler 
開發者ID:yahshibu,項目名稱:nested-ner-tacl2020-transformers,代碼行數:20,代碼來源:utils.py

示例5: make_optimizer

# 需要導入模塊: import adabound [as 別名]
# 或者: from adabound import AdaBound [as 別名]
def make_optimizer(config, model):
    mode = config['mode']
    config = config['aspect_' + mode + '_model'][config['aspect_' + mode + '_model']['type']]
    lr = config['learning_rate']
    weight_decay = config['weight_decay']
    opt = {
        'sgd': optim.SGD,
        'adadelta': optim.Adadelta,
        'adam': optim.Adam,
        'adamax': optim.Adamax,
        'adagrad': optim.Adagrad,
        'asgd': optim.ASGD,
        'rmsprop': optim.RMSprop,
        'adabound': adabound.AdaBound
    }
    if 'momentum' in config:
        optimizer = opt[config['optimizer']](model.parameters(), lr=lr, weight_decay=weight_decay, momentum=config['momentum'])
    else:
        optimizer = opt[config['optimizer']](model.parameters(), lr=lr, weight_decay=weight_decay)
    return optimizer 
開發者ID:siat-nlp,項目名稱:MAMS-for-ABSA,代碼行數:22,代碼來源:make_optimizer.py

示例6: get_parser

# 需要導入模塊: import adabound [as 別名]
# 或者: from adabound import AdaBound [as 別名]
def get_parser():
    parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
    parser.add_argument('--model', default='resnet', type=str, help='model',
                        choices=['resnet', 'densenet'])
    parser.add_argument('--optim', default='sgd', type=str, help='optimizer',
                        choices=['sgd', 'adagrad', 'adam', 'amsgrad', 'adabound', 'amsbound'])
    parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
    parser.add_argument('--final_lr', default=0.1, type=float,
                        help='final learning rate of AdaBound')
    parser.add_argument('--gamma', default=1e-3, type=float,
                        help='convergence speed term of AdaBound')
    parser.add_argument('--momentum', default=0.9, type=float, help='momentum term')
    parser.add_argument('--beta1', default=0.9, type=float, help='Adam coefficients beta_1')
    parser.add_argument('--beta2', default=0.999, type=float, help='Adam coefficients beta_2')
    parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
    parser.add_argument('--weight_decay', default=5e-4, type=float,
                        help='weight decay for optimizers')
    return parser 
開發者ID:Luolc,項目名稱:AdaBound,代碼行數:20,代碼來源:main.py

示例7: create_optimizer

# 需要導入模塊: import adabound [as 別名]
# 或者: from adabound import AdaBound [as 別名]
def create_optimizer(args, model_params):
    if args.optim == 'sgd':
        return optim.SGD(model_params, args.lr, momentum=args.momentum,
                         weight_decay=args.weight_decay)
    elif args.optim == 'adagrad':
        return optim.Adagrad(model_params, args.lr, weight_decay=args.weight_decay)
    elif args.optim == 'adam':
        return optim.Adam(model_params, args.lr, betas=(args.beta1, args.beta2),
                          weight_decay=args.weight_decay)
    elif args.optim == 'amsgrad':
        return optim.Adam(model_params, args.lr, betas=(args.beta1, args.beta2),
                          weight_decay=args.weight_decay, amsgrad=True)
    elif args.optim == 'adabound':
        return AdaBound(model_params, args.lr, betas=(args.beta1, args.beta2),
                        final_lr=args.final_lr, gamma=args.gamma,
                        weight_decay=args.weight_decay)
    else:
        assert args.optim == 'amsbound'
        return AdaBound(model_params, args.lr, betas=(args.beta1, args.beta2),
                        final_lr=args.final_lr, gamma=args.gamma,
                        weight_decay=args.weight_decay, amsbound=True) 
開發者ID:Luolc,項目名稱:AdaBound,代碼行數:23,代碼來源:main.py

示例8: test_same

# 需要導入模塊: import adabound [as 別名]
# 或者: from adabound import AdaBound [as 別名]
def test_same(self):
        if sys.version_info[0] < 3:
            return
        self.reset_seed(0xcafe)
        w, b = self.gen_random_weights()
        torch_linear = self.gen_torch_linear(w, b)
        keras_linear = self.gen_keras_linear(w, b)
        model_path = os.path.join(tempfile.gettempdir(), 'keras_adabound.h5')
        keras_linear.save(model_path)
        keras_linear = keras.models.load_model(model_path, custom_objects={'AdaBound': AdaBound})
        w, b = self.gen_random_weights()
        criterion = torch.nn.MSELoss()
        optimizer = OfficialAdaBound(torch_linear.parameters(), lr=1e-3, final_lr=0.1, eps=K.epsilon())
        for i in range(300):
            x = np.random.standard_normal((1, 3))
            y = np.dot(x, w) + b
            optimizer.zero_grad()
            y_hat = torch_linear(torch.Tensor(x.tolist()))
            loss = criterion(y_hat, torch.Tensor(y.tolist()))
            torch_loss = loss.tolist()
            loss.backward()
            optimizer.step()
            keras_loss = keras_linear.train_on_batch(x, y).tolist()
            # print(i, torch_loss, keras_loss)
        self.assertTrue(abs(torch_loss - keras_loss) < 1e-2)
        self.assertTrue(np.allclose(
            torch_linear.weight.detach().numpy().transpose(),
            keras_linear.get_weights()[0],
            atol=1e-2,
        ))
        self.assertTrue(np.allclose(
            torch_linear.bias.detach().numpy(),
            keras_linear.get_weights()[1],
            atol=1e-2,
        )) 
開發者ID:CyberZHG,項目名稱:keras-adabound,代碼行數:37,代碼來源:test_similar.py

示例9: test_same_amsgrad

# 需要導入模塊: import adabound [as 別名]
# 或者: from adabound import AdaBound [as 別名]
def test_same_amsgrad(self):
        if sys.version_info[0] < 3:
            return
        self.reset_seed(0xcafe)
        w, b = self.gen_random_weights()
        torch_linear = self.gen_torch_linear(w, b)
        keras_linear = self.gen_keras_linear(w, b, amsgrad=True)
        w, b = self.gen_random_weights()
        criterion = torch.nn.MSELoss()
        optimizer = OfficialAdaBound(
            torch_linear.parameters(),
            lr=1e-3,
            final_lr=0.1,
            eps=K.epsilon(),
            amsbound=True,
        )
        for i in range(300):
            x = np.random.standard_normal((1, 3))
            y = np.dot(x, w) + b
            optimizer.zero_grad()
            y_hat = torch_linear(torch.Tensor(x.tolist()))
            loss = criterion(y_hat, torch.Tensor(y.tolist()))
            torch_loss = loss.tolist()
            loss.backward()
            optimizer.step()
            keras_loss = keras_linear.train_on_batch(x, y).tolist()
            # print(i, torch_loss, keras_loss)
        self.assertTrue(abs(torch_loss - keras_loss) < 1e-2)
        self.assertTrue(np.allclose(
            torch_linear.weight.detach().numpy().transpose(),
            keras_linear.get_weights()[0],
            atol=1e-2,
        ))
        self.assertTrue(np.allclose(
            torch_linear.bias.detach().numpy(),
            keras_linear.get_weights()[1],
            atol=1e-2,
        )) 
開發者ID:CyberZHG,項目名稱:keras-adabound,代碼行數:40,代碼來源:test_similar.py

示例10: test_same_weight_decay

# 需要導入模塊: import adabound [as 別名]
# 或者: from adabound import AdaBound [as 別名]
def test_same_weight_decay(self):
        if sys.version_info[0] < 3:
            return
        self.reset_seed(0xcafe)
        w, b = self.gen_random_weights()
        torch_linear = self.gen_torch_linear(w, b)
        keras_linear = self.gen_keras_linear(w, b, weight_decay=0.1)
        w, b = self.gen_random_weights()
        criterion = torch.nn.MSELoss()
        optimizer = OfficialAdaBound(
            torch_linear.parameters(),
            lr=1e-3,
            final_lr=0.1,
            eps=K.epsilon(),
            weight_decay=0.1,
        )
        for i in range(300):
            x = np.random.standard_normal((1, 3))
            y = np.dot(x, w) + b
            optimizer.zero_grad()
            y_hat = torch_linear(torch.Tensor(x.tolist()))
            loss = criterion(y_hat, torch.Tensor(y.tolist()))
            torch_loss = loss.tolist()
            loss.backward()
            optimizer.step()
            keras_loss = keras_linear.train_on_batch(x, y).tolist()
            # print(i, torch_loss, keras_loss)
        self.assertTrue(abs(torch_loss - keras_loss) < 1e-2)
        self.assertTrue(np.allclose(
            torch_linear.weight.detach().numpy().transpose(),
            keras_linear.get_weights()[0],
            atol=1e-2,
        ))
        self.assertTrue(np.allclose(
            torch_linear.bias.detach().numpy(),
            keras_linear.get_weights()[1],
            atol=1e-2,
        )) 
開發者ID:CyberZHG,項目名稱:keras-adabound,代碼行數:40,代碼來源:test_similar.py


注:本文中的adabound.AdaBound方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。