當前位置: 首頁>>代碼示例>>Python>>正文


Python modeling.BertLayerNorm方法代碼示例

本文整理匯總了Python中pytorch_pretrained_bert.modeling.BertLayerNorm方法的典型用法代碼示例。如果您正苦於以下問題:Python modeling.BertLayerNorm方法的具體用法?Python modeling.BertLayerNorm怎麽用?Python modeling.BertLayerNorm使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在pytorch_pretrained_bert.modeling的用法示例。


在下文中一共展示了modeling.BertLayerNorm方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _my_init

# 需要導入模塊: from pytorch_pretrained_bert import modeling [as 別名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 別名]
def _my_init(self):
        def init_weights(module):
            if isinstance(module, (nn.Linear, nn.Embedding)):
                # Slightly different from the TF version which uses truncated_normal for initialization
                # cf https://github.com/pytorch/pytorch/pull/5617
                module.weight.data.normal_(mean=0.0, std=self.bert_config.initializer_range * self.opt['init_ratio'])
            elif isinstance(module, BertLayerNorm):
                # Slightly different from the BERT pytorch version, which should be a bug.
                # Note that it only affects on training from scratch. For detailed discussions, please contact xiaodl@.
                # Layer normalization (https://arxiv.org/abs/1607.06450)
                # support both old/latest version
                if 'beta' in dir(module) and 'gamma' in dir(module):
                    module.beta.data.zero_()
                    module.gamma.data.fill_(1.0)
                else:
                    module.bias.data.zero_()
                    module.weight.data.fill_(1.0)
            if isinstance(module, nn.Linear):
                module.bias.data.zero_()
        self.apply(init_weights) 
開發者ID:RTIInternational,項目名稱:gobbli,代碼行數:22,代碼來源:matcher.py

示例2: __init__

# 需要導入模塊: from pytorch_pretrained_bert import modeling [as 別名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 別名]
def __init__(self, num_hid, bidirect, dropout, rnn_type):
        super().__init__()

        assert isinstance(rnn_type, str)
        rnn_type = rnn_type.upper()
        assert rnn_type == "LSTM" or rnn_type == "GRU"
        rnn_cls = getattr(nn, rnn_type)
        self._rnn = rnn_cls(
            num_hid,
            num_hid,
            1,
            bidirectional=bidirect,
            dropout=dropout,
            batch_first=True,
        )
        self._layer_norm = BertLayerNorm(num_hid, eps=1e-12)
        self.rnn_type = rnn_type
        self.num_hid = num_hid
        self.ndirections = 1 + int(bidirect) 
開發者ID:microsoft,項目名稱:MT-DNN,代碼行數:21,代碼來源:san_model.py

示例3: _my_init

# 需要導入模塊: from pytorch_pretrained_bert import modeling [as 別名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 別名]
def _my_init(self):
        def init_weights(module):
            if isinstance(module, (nn.Linear, nn.Embedding)):
                # Slightly different from the TF version which uses truncated_normal for initialization
                # cf https://github.com/pytorch/pytorch/pull/5617
                module.weight.data.normal_(mean=0.0, std=0.02 * self.config.init_ratio)
            elif isinstance(module, BertLayerNorm):
                # Slightly different from the BERT pytorch version, which should be a bug.
                # Note that it only affects on training from scratch. For detailed discussions, please contact xiaodl@.
                # Layer normalization (https://arxiv.org/abs/1607.06450)
                # support both old/latest version
                if "beta" in dir(module) and "gamma" in dir(module):
                    module.beta.data.zero_()
                    module.gamma.data.fill_(1.0)
                else:
                    module.bias.data.zero_()
                    module.weight.data.fill_(1.0)
            if isinstance(module, nn.Linear):
                module.bias.data.zero_()

        self.apply(init_weights) 
開發者ID:microsoft,項目名稱:MT-DNN,代碼行數:23,代碼來源:san.py

示例4: __init__

# 需要導入模塊: from pytorch_pretrained_bert import modeling [as 別名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 別名]
def __init__(self, config):
        super().__init__()
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
        self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
        self.dropout = nn.Dropout(config.hidden_dropout_prob) 
開發者ID:sattree,項目名稱:gap,代碼行數:7,代碼來源:evidence_pooling.py

示例5: init_bert_weights

# 需要導入模塊: from pytorch_pretrained_bert import modeling [as 別名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 別名]
def init_bert_weights(module, initializer_range):
    """ Initialize the weights.
    """
    if isinstance(module, (nn.Linear, nn.Embedding)):
        # Slightly different from the TF version which uses truncated_normal for initialization
        # cf https://github.com/pytorch/pytorch/pull/5617
        module.weight.data.normal_(mean=0.0, std=initializer_range)
    elif isinstance(module, BertLayerNorm):
        module.beta.data.normal_(mean=0.0, std=initializer_range)
        module.gamma.data.normal_(mean=0.0, std=initializer_range)
    if isinstance(module, nn.Linear) and module.bias is not None:
        module.bias.data.zero_() 
開發者ID:easonnie,項目名稱:semanticRetrievalMRS,代碼行數:14,代碼來源:bert_v0_1.py

示例6: init_bert_weights

# 需要導入模塊: from pytorch_pretrained_bert import modeling [as 別名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 別名]
def init_bert_weights(module):
    """ Initialize the weights.
    """
    initializer_range = 0.02
    if isinstance(module, (nn.Linear, nn.Embedding)):
        # Slightly different from the TF version which uses truncated_normal for initialization
        # cf https://github.com/pytorch/pytorch/pull/5617
        module.weight.data.normal_(mean=0.0, std=initializer_range)
    elif isinstance(module, BertLayerNorm):
        module.beta.data.normal_(mean=0.0, std=initializer_range)
        module.gamma.data.normal_(mean=0.0, std=initializer_range)
    if isinstance(module, nn.Linear) and module.bias is not None:
        module.bias.data.zero_() 
開發者ID:easonnie,項目名稱:semanticRetrievalMRS,代碼行數:15,代碼來源:bert_multilayer_output.py

示例7: __init__

# 需要導入模塊: from pytorch_pretrained_bert import modeling [as 別名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 別名]
def __init__(self,
                 vocab: Vocabulary,
                 entity_linker: Model,
                 span_attention_config: Dict[str, int],
                 should_init_kg_to_bert_inverse: bool = True,
                 freeze: bool = False,
                 regularizer: RegularizerApplicator = None):
        super().__init__(vocab, regularizer)

        self.entity_linker = entity_linker
        self.entity_embedding_dim = self.entity_linker.disambiguator.entity_embedding_dim
        self.contextual_embedding_dim = self.entity_linker.disambiguator.contextual_embedding_dim

        self.weighted_entity_layer_norm = BertLayerNorm(self.entity_embedding_dim, eps=1e-5)
        init_bert_weights(self.weighted_entity_layer_norm, 0.02)

        self.dropout = torch.nn.Dropout(0.1)

        # the span attention layers
        assert len(span_attention_config) == 4
        config = BertConfig(
            0, # vocab size, not used
            hidden_size=span_attention_config['hidden_size'],
            num_hidden_layers=span_attention_config['num_hidden_layers'],
            num_attention_heads=span_attention_config['num_attention_heads'],
            intermediate_size=span_attention_config['intermediate_size']
        )
        self.span_attention_layer = SpanAttentionLayer(config)
        # already init inside span attention layer

        # for the output!
        self.output_layer_norm = BertLayerNorm(self.contextual_embedding_dim, eps=1e-5)

        self.kg_to_bert_projection = torch.nn.Linear(
                self.entity_embedding_dim, self.contextual_embedding_dim
        )

        self.should_init_kg_to_bert_inverse = should_init_kg_to_bert_inverse
        self._init_kg_to_bert_projection()

        self._freeze_all = freeze 
開發者ID:allenai,項目名稱:kb,代碼行數:43,代碼來源:knowbert.py

示例8: init_bert_weights

# 需要導入模塊: from pytorch_pretrained_bert import modeling [as 別名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 別名]
def init_bert_weights(self, module):
        """ Initialize the weights.
        """
        if isinstance(module, (nn.Linear, nn.Embedding)): 
            # Slightly different from the TF version which uses truncated_normal for initialization
            # cf https://github.com/pytorch/pytorch/pull/5617
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
        elif isinstance(module, BertLayerNorm):
            module.bias.data.zero_()
            module.weight.data.fill_(1.0)
        if isinstance(module, nn.Linear) and module.bias is not None:
            module.bias.data.zero_() 
開發者ID:Louis-udm,項目名稱:NER-BERT-CRF,代碼行數:14,代碼來源:NER_BERT_CRF.py

示例9: __init__

# 需要導入模塊: from pytorch_pretrained_bert import modeling [as 別名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 別名]
def __init__(self, num_hid, bidirect, dropout, rnn_type):
        super().__init__()

        assert isinstance(rnn_type, str)
        rnn_type = rnn_type.upper()
        assert rnn_type == 'LSTM' or rnn_type == 'GRU'
        rnn_cls = getattr(nn, rnn_type)
        self._rnn = rnn_cls(num_hid, num_hid, 1,
                bidirectional=bidirect,
                dropout=dropout,
                batch_first=True)
        self._layer_norm = BertLayerNorm(num_hid, eps=1e-12)
        self.rnn_type = rnn_type
        self.num_hid = num_hid
        self.ndirections = 1 + int(bidirect) 
開發者ID:namisan,項目名稱:mt-dnn,代碼行數:17,代碼來源:san_model.py


注:本文中的pytorch_pretrained_bert.modeling.BertLayerNorm方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。