当前位置: 首页>>代码示例>>Python>>正文


Python optimization.warmup_linear方法代码示例

本文整理汇总了Python中pytorch_pretrained_bert.optimization.warmup_linear方法的典型用法代码示例。如果您正苦于以下问题:Python optimization.warmup_linear方法的具体用法?Python optimization.warmup_linear怎么用?Python optimization.warmup_linear使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pytorch_pretrained_bert.optimization的用法示例。


在下文中一共展示了optimization.warmup_linear方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from pytorch_pretrained_bert import optimization [as 别名]
# 或者: from pytorch_pretrained_bert.optimization import warmup_linear [as 别名]
def __init__(self, params, lr, warmup=-1, t_total=-1, schedule='warmup_linear',
                 betas=(0.9, 0.999), eps=1e-6, weight_decay_rate=0.01,
                 max_grad_norm=1.0):
        if not lr >= 0.0:
            raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
        if not 0.0 <= warmup < 1.0 and not warmup == -1:
            raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
        if not 0.0 <= eps:
            raise ValueError("Invalid epsilon value: {}".format(eps))
        if not 0.0 <= betas[0] < 1.0:
            raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
        if not 0.0 <= betas[1] < 1.0:
            raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
        defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
                        betas=betas, eps=eps, weight_decay_rate=weight_decay_rate,
                        max_grad_norm=max_grad_norm)
        super(Adamax, self).__init__(params, defaults) 
开发者ID:RTIInternational,项目名称:gobbli,代码行数:19,代码来源:bert_optim.py

示例2: __init__

# 需要导入模块: from pytorch_pretrained_bert import optimization [as 别名]
# 或者: from pytorch_pretrained_bert.optimization import warmup_linear [as 别名]
def __init__(self, params, lr, warmup=-1, t_total=-1, schedule='warmup_linear',
                 betas=(0.9, 0.999), eps=1e-6, weight_decay=0.01,
                 max_grad_norm=1.0):
        if not lr >= 0.0:
            raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
        if not 0.0 <= warmup < 1.0 and not warmup == -1:
            raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
        if not 0.0 <= eps:
            raise ValueError("Invalid epsilon value: {}".format(eps))
        if not 0.0 <= betas[0] < 1.0:
            raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
        if not 0.0 <= betas[1] < 1.0:
            raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
        defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
                        betas=betas, eps=eps, weight_decay=weight_decay,
                        max_grad_norm=max_grad_norm)
        super(Adamax, self).__init__(params, defaults) 
开发者ID:namisan,项目名称:mt-dnn,代码行数:19,代码来源:bert_optim.py

示例3: schedule_func

# 需要导入模块: from pytorch_pretrained_bert import optimization [as 别名]
# 或者: from pytorch_pretrained_bert.optimization import warmup_linear [as 别名]
def schedule_func(sch):
    try:
        f = eval(sch)
    except:
        f = warmup_linear
    return f 
开发者ID:RTIInternational,项目名称:gobbli,代码行数:8,代码来源:bert_optim.py

示例4: warmup_linear

# 需要导入模块: from pytorch_pretrained_bert import optimization [as 别名]
# 或者: from pytorch_pretrained_bert.optimization import warmup_linear [as 别名]
def warmup_linear(x, warmup=0.002):
    if x < warmup:
        return x/warmup
    return 1.0 - x 
开发者ID:Louis-udm,项目名称:NER-BERT-CRF,代码行数:6,代码来源:NER_BERT_CRF.py

示例5: __init__

# 需要导入模块: from pytorch_pretrained_bert import optimization [as 别名]
# 或者: from pytorch_pretrained_bert.optimization import warmup_linear [as 别名]
def __init__(
        self,
        params,
        lr,
        warmup=-1,
        t_total=-1,
        schedule="warmup_linear",
        betas=(0.9, 0.999),
        eps=1e-6,
        weight_decay=0.01,
        max_grad_norm=1.0,
    ):
        if not lr >= 0.0:
            raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
        if not 0.0 <= warmup < 1.0 and not warmup == -1:
            raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
        if not 0.0 <= eps:
            raise ValueError("Invalid epsilon value: {}".format(eps))
        if not 0.0 <= betas[0] < 1.0:
            raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
        if not 0.0 <= betas[1] < 1.0:
            raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
        defaults = dict(
            lr=lr,
            schedule=schedule,
            warmup=warmup,
            t_total=t_total,
            betas=betas,
            eps=eps,
            weight_decay=weight_decay,
            max_grad_norm=max_grad_norm,
        )
        super(Adamax, self).__init__(params, defaults) 
开发者ID:microsoft,项目名称:MT-DNN,代码行数:35,代码来源:bert_optim.py


注:本文中的pytorch_pretrained_bert.optimization.warmup_linear方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。