当前位置: 首页>>代码示例>>Python>>正文


Python modeling.BertLayerNorm方法代码示例

本文整理汇总了Python中pytorch_pretrained_bert.modeling.BertLayerNorm方法的典型用法代码示例。如果您正苦于以下问题:Python modeling.BertLayerNorm方法的具体用法?Python modeling.BertLayerNorm怎么用?Python modeling.BertLayerNorm使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pytorch_pretrained_bert.modeling的用法示例。


在下文中一共展示了modeling.BertLayerNorm方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _my_init

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 别名]
def _my_init(self):
        def init_weights(module):
            if isinstance(module, (nn.Linear, nn.Embedding)):
                # Slightly different from the TF version which uses truncated_normal for initialization
                # cf https://github.com/pytorch/pytorch/pull/5617
                module.weight.data.normal_(mean=0.0, std=self.bert_config.initializer_range * self.opt['init_ratio'])
            elif isinstance(module, BertLayerNorm):
                # Slightly different from the BERT pytorch version, which should be a bug.
                # Note that it only affects on training from scratch. For detailed discussions, please contact xiaodl@.
                # Layer normalization (https://arxiv.org/abs/1607.06450)
                # support both old/latest version
                if 'beta' in dir(module) and 'gamma' in dir(module):
                    module.beta.data.zero_()
                    module.gamma.data.fill_(1.0)
                else:
                    module.bias.data.zero_()
                    module.weight.data.fill_(1.0)
            if isinstance(module, nn.Linear):
                module.bias.data.zero_()
        self.apply(init_weights) 
开发者ID:RTIInternational,项目名称:gobbli,代码行数:22,代码来源:matcher.py

示例2: __init__

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 别名]
def __init__(self, num_hid, bidirect, dropout, rnn_type):
        super().__init__()

        assert isinstance(rnn_type, str)
        rnn_type = rnn_type.upper()
        assert rnn_type == "LSTM" or rnn_type == "GRU"
        rnn_cls = getattr(nn, rnn_type)
        self._rnn = rnn_cls(
            num_hid,
            num_hid,
            1,
            bidirectional=bidirect,
            dropout=dropout,
            batch_first=True,
        )
        self._layer_norm = BertLayerNorm(num_hid, eps=1e-12)
        self.rnn_type = rnn_type
        self.num_hid = num_hid
        self.ndirections = 1 + int(bidirect) 
开发者ID:microsoft,项目名称:MT-DNN,代码行数:21,代码来源:san_model.py

示例3: _my_init

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 别名]
def _my_init(self):
        def init_weights(module):
            if isinstance(module, (nn.Linear, nn.Embedding)):
                # Slightly different from the TF version which uses truncated_normal for initialization
                # cf https://github.com/pytorch/pytorch/pull/5617
                module.weight.data.normal_(mean=0.0, std=0.02 * self.config.init_ratio)
            elif isinstance(module, BertLayerNorm):
                # Slightly different from the BERT pytorch version, which should be a bug.
                # Note that it only affects on training from scratch. For detailed discussions, please contact xiaodl@.
                # Layer normalization (https://arxiv.org/abs/1607.06450)
                # support both old/latest version
                if "beta" in dir(module) and "gamma" in dir(module):
                    module.beta.data.zero_()
                    module.gamma.data.fill_(1.0)
                else:
                    module.bias.data.zero_()
                    module.weight.data.fill_(1.0)
            if isinstance(module, nn.Linear):
                module.bias.data.zero_()

        self.apply(init_weights) 
开发者ID:microsoft,项目名称:MT-DNN,代码行数:23,代码来源:san.py

示例4: __init__

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 别名]
def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
        self.dropout = nn.Dropout(config.hidden_dropout_prob) 
开发者ID:sattree,项目名称:gap,代码行数:7,代码来源:evidence_pooling.py

示例5: init_bert_weights

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 别名]
def init_bert_weights(module, initializer_range):
    """ Initialize the weights.
    """
    if isinstance(module, (nn.Linear, nn.Embedding)):
        # Slightly different from the TF version which uses truncated_normal for initialization
        # cf https://github.com/pytorch/pytorch/pull/5617
        module.weight.data.normal_(mean=0.0, std=initializer_range)
    elif isinstance(module, BertLayerNorm):
        module.beta.data.normal_(mean=0.0, std=initializer_range)
        module.gamma.data.normal_(mean=0.0, std=initializer_range)
    if isinstance(module, nn.Linear) and module.bias is not None:
        module.bias.data.zero_() 
开发者ID:easonnie,项目名称:semanticRetrievalMRS,代码行数:14,代码来源:bert_v0_1.py

示例6: init_bert_weights

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 别名]
def init_bert_weights(module):
    """ Initialize the weights.
    """
    initializer_range = 0.02
    if isinstance(module, (nn.Linear, nn.Embedding)):
        # Slightly different from the TF version which uses truncated_normal for initialization
        # cf https://github.com/pytorch/pytorch/pull/5617
        module.weight.data.normal_(mean=0.0, std=initializer_range)
    elif isinstance(module, BertLayerNorm):
        module.beta.data.normal_(mean=0.0, std=initializer_range)
        module.gamma.data.normal_(mean=0.0, std=initializer_range)
    if isinstance(module, nn.Linear) and module.bias is not None:
        module.bias.data.zero_() 
开发者ID:easonnie,项目名称:semanticRetrievalMRS,代码行数:15,代码来源:bert_multilayer_output.py

示例7: __init__

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 别名]
def __init__(self,
                 vocab: Vocabulary,
                 entity_linker: Model,
                 span_attention_config: Dict[str, int],
                 should_init_kg_to_bert_inverse: bool = True,
                 freeze: bool = False,
                 regularizer: RegularizerApplicator = None):
        super().__init__(vocab, regularizer)

        self.entity_linker = entity_linker
        self.entity_embedding_dim = self.entity_linker.disambiguator.entity_embedding_dim
        self.contextual_embedding_dim = self.entity_linker.disambiguator.contextual_embedding_dim

        self.weighted_entity_layer_norm = BertLayerNorm(self.entity_embedding_dim, eps=1e-5)
        init_bert_weights(self.weighted_entity_layer_norm, 0.02)

        self.dropout = torch.nn.Dropout(0.1)

        # the span attention layers
        assert len(span_attention_config) == 4
        config = BertConfig(
            0, # vocab size, not used
            hidden_size=span_attention_config['hidden_size'],
            num_hidden_layers=span_attention_config['num_hidden_layers'],
            num_attention_heads=span_attention_config['num_attention_heads'],
            intermediate_size=span_attention_config['intermediate_size']
        )
        self.span_attention_layer = SpanAttentionLayer(config)
        # already init inside span attention layer

        # for the output!
        self.output_layer_norm = BertLayerNorm(self.contextual_embedding_dim, eps=1e-5)

        self.kg_to_bert_projection = torch.nn.Linear(
                self.entity_embedding_dim, self.contextual_embedding_dim
        )

        self.should_init_kg_to_bert_inverse = should_init_kg_to_bert_inverse
        self._init_kg_to_bert_projection()

        self._freeze_all = freeze 
开发者ID:allenai,项目名称:kb,代码行数:43,代码来源:knowbert.py

示例8: init_bert_weights

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 别名]
def init_bert_weights(self, module):
        """ Initialize the weights.
        """
        if isinstance(module, (nn.Linear, nn.Embedding)): 
            # Slightly different from the TF version which uses truncated_normal for initialization
            # cf https://github.com/pytorch/pytorch/pull/5617
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
        elif isinstance(module, BertLayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
        if isinstance(module, nn.Linear) and module.bias is not None:
            module.bias.data.zero_() 
开发者ID:Louis-udm,项目名称:NER-BERT-CRF,代码行数:14,代码来源:NER_BERT_CRF.py

示例9: __init__

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 别名]
def __init__(self, num_hid, bidirect, dropout, rnn_type):
        super().__init__()

        assert isinstance(rnn_type, str)
        rnn_type = rnn_type.upper()
        assert rnn_type == 'LSTM' or rnn_type == 'GRU'
        rnn_cls = getattr(nn, rnn_type)
        self._rnn = rnn_cls(num_hid, num_hid, 1,
                bidirectional=bidirect,
                dropout=dropout,
                batch_first=True)
        self._layer_norm = BertLayerNorm(num_hid, eps=1e-12)
        self.rnn_type = rnn_type
        self.num_hid = num_hid
        self.ndirections = 1 + int(bidirect) 
开发者ID:namisan,项目名称:mt-dnn,代码行数:17,代码来源:san_model.py


注:本文中的pytorch_pretrained_bert.modeling.BertLayerNorm方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。