本文整理匯總了Python中pytorch_pretrained_bert.modeling.BertLayerNorm方法的典型用法代碼示例。如果您正苦於以下問題:Python modeling.BertLayerNorm方法的具體用法?Python modeling.BertLayerNorm怎麽用?Python modeling.BertLayerNorm使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類pytorch_pretrained_bert.modeling
的用法示例。
在下文中一共展示了modeling.BertLayerNorm方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _my_init
# 需要導入模塊: from pytorch_pretrained_bert import modeling [as 別名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 別名]
def _my_init(self):
def init_weights(module):
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.bert_config.initializer_range * self.opt['init_ratio'])
elif isinstance(module, BertLayerNorm):
# Slightly different from the BERT pytorch version, which should be a bug.
# Note that it only affects on training from scratch. For detailed discussions, please contact xiaodl@.
# Layer normalization (https://arxiv.org/abs/1607.06450)
# support both old/latest version
if 'beta' in dir(module) and 'gamma' in dir(module):
module.beta.data.zero_()
module.gamma.data.fill_(1.0)
else:
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear):
module.bias.data.zero_()
self.apply(init_weights)
示例2: __init__
# 需要導入模塊: from pytorch_pretrained_bert import modeling [as 別名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 別名]
def __init__(self, num_hid, bidirect, dropout, rnn_type):
super().__init__()
assert isinstance(rnn_type, str)
rnn_type = rnn_type.upper()
assert rnn_type == "LSTM" or rnn_type == "GRU"
rnn_cls = getattr(nn, rnn_type)
self._rnn = rnn_cls(
num_hid,
num_hid,
1,
bidirectional=bidirect,
dropout=dropout,
batch_first=True,
)
self._layer_norm = BertLayerNorm(num_hid, eps=1e-12)
self.rnn_type = rnn_type
self.num_hid = num_hid
self.ndirections = 1 + int(bidirect)
示例3: _my_init
# 需要導入模塊: from pytorch_pretrained_bert import modeling [as 別名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 別名]
def _my_init(self):
def init_weights(module):
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=0.02 * self.config.init_ratio)
elif isinstance(module, BertLayerNorm):
# Slightly different from the BERT pytorch version, which should be a bug.
# Note that it only affects on training from scratch. For detailed discussions, please contact xiaodl@.
# Layer normalization (https://arxiv.org/abs/1607.06450)
# support both old/latest version
if "beta" in dir(module) and "gamma" in dir(module):
module.beta.data.zero_()
module.gamma.data.fill_(1.0)
else:
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear):
module.bias.data.zero_()
self.apply(init_weights)
示例4: __init__
# 需要導入模塊: from pytorch_pretrained_bert import modeling [as 別名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 別名]
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
示例5: init_bert_weights
# 需要導入模塊: from pytorch_pretrained_bert import modeling [as 別名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 別名]
def init_bert_weights(module, initializer_range):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=initializer_range)
elif isinstance(module, BertLayerNorm):
module.beta.data.normal_(mean=0.0, std=initializer_range)
module.gamma.data.normal_(mean=0.0, std=initializer_range)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
示例6: init_bert_weights
# 需要導入模塊: from pytorch_pretrained_bert import modeling [as 別名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 別名]
def init_bert_weights(module):
""" Initialize the weights.
"""
initializer_range = 0.02
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=initializer_range)
elif isinstance(module, BertLayerNorm):
module.beta.data.normal_(mean=0.0, std=initializer_range)
module.gamma.data.normal_(mean=0.0, std=initializer_range)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
示例7: __init__
# 需要導入模塊: from pytorch_pretrained_bert import modeling [as 別名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 別名]
def __init__(self,
vocab: Vocabulary,
entity_linker: Model,
span_attention_config: Dict[str, int],
should_init_kg_to_bert_inverse: bool = True,
freeze: bool = False,
regularizer: RegularizerApplicator = None):
super().__init__(vocab, regularizer)
self.entity_linker = entity_linker
self.entity_embedding_dim = self.entity_linker.disambiguator.entity_embedding_dim
self.contextual_embedding_dim = self.entity_linker.disambiguator.contextual_embedding_dim
self.weighted_entity_layer_norm = BertLayerNorm(self.entity_embedding_dim, eps=1e-5)
init_bert_weights(self.weighted_entity_layer_norm, 0.02)
self.dropout = torch.nn.Dropout(0.1)
# the span attention layers
assert len(span_attention_config) == 4
config = BertConfig(
0, # vocab size, not used
hidden_size=span_attention_config['hidden_size'],
num_hidden_layers=span_attention_config['num_hidden_layers'],
num_attention_heads=span_attention_config['num_attention_heads'],
intermediate_size=span_attention_config['intermediate_size']
)
self.span_attention_layer = SpanAttentionLayer(config)
# already init inside span attention layer
# for the output!
self.output_layer_norm = BertLayerNorm(self.contextual_embedding_dim, eps=1e-5)
self.kg_to_bert_projection = torch.nn.Linear(
self.entity_embedding_dim, self.contextual_embedding_dim
)
self.should_init_kg_to_bert_inverse = should_init_kg_to_bert_inverse
self._init_kg_to_bert_projection()
self._freeze_all = freeze
示例8: init_bert_weights
# 需要導入模塊: from pytorch_pretrained_bert import modeling [as 別名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 別名]
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
示例9: __init__
# 需要導入模塊: from pytorch_pretrained_bert import modeling [as 別名]
# 或者: from pytorch_pretrained_bert.modeling import BertLayerNorm [as 別名]
def __init__(self, num_hid, bidirect, dropout, rnn_type):
super().__init__()
assert isinstance(rnn_type, str)
rnn_type = rnn_type.upper()
assert rnn_type == 'LSTM' or rnn_type == 'GRU'
rnn_cls = getattr(nn, rnn_type)
self._rnn = rnn_cls(num_hid, num_hid, 1,
bidirectional=bidirect,
dropout=dropout,
batch_first=True)
self._layer_norm = BertLayerNorm(num_hid, eps=1e-12)
self.rnn_type = rnn_type
self.num_hid = num_hid
self.ndirections = 1 + int(bidirect)