当前位置: 首页>>代码示例>>Python>>正文


Python modeling.BertConfig方法代码示例

本文整理汇总了Python中pytorch_pretrained_bert.modeling.BertConfig方法的典型用法代码示例。如果您正苦于以下问题:Python modeling.BertConfig方法的具体用法?Python modeling.BertConfig怎么用?Python modeling.BertConfig使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pytorch_pretrained_bert.modeling的用法示例。


在下文中一共展示了modeling.BertConfig方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_span_word_attention

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertConfig [as 别名]
def test_span_word_attention(self):
        config_file = 'tests/fixtures/bert/bert_config.json'
        with open(config_file) as fin:
            json_config = json.load(fin)

        vocab_size = json_config.pop("vocab_size")
        config = BertConfig(vocab_size, **json_config)

        span_attn = SpanWordAttention(config)

        batch_size = 7
        timesteps = 29
        hidden_states = torch.rand(batch_size, timesteps, config.hidden_size)

        num_entity_embeddings = 11
        entity_embeddings = torch.rand(batch_size, num_entity_embeddings, config.hidden_size)
        entity_mask = entity_embeddings[:, :, 0] > 0.5

        span_attn, attention_probs = span_attn(hidden_states, entity_embeddings, entity_mask)
        self.assertEqual(list(span_attn.shape), [batch_size, timesteps, config.hidden_size]) 
开发者ID:allenai,项目名称:kb,代码行数:22,代码来源:test_span_attention_layer.py

示例2: test_span_attention_layer

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertConfig [as 别名]
def test_span_attention_layer(self):
        config_file = 'tests/fixtures/bert/bert_config.json'
        with open(config_file) as fin:
            json_config = json.load(fin)

        vocab_size = json_config.pop("vocab_size")
        config = BertConfig(vocab_size, **json_config)
    
        batch_size = 7
        timesteps = 29
        hidden_states = torch.rand(batch_size, timesteps, config.hidden_size)
    
        num_entity_embeddings = 11
        entity_embeddings = torch.rand(batch_size, num_entity_embeddings, config.hidden_size)
        entity_mask = entity_embeddings[:, :, 0] > 0.5
    
        span_attention_layer = SpanAttentionLayer(config)
    
        output = span_attention_layer(hidden_states, entity_embeddings, entity_mask)

        self.assertEqual(list(output["output"].shape), [batch_size, timesteps, config.hidden_size]) 
开发者ID:allenai,项目名称:kb,代码行数:23,代码来源:test_span_attention_layer.py

示例3: __init__

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertConfig [as 别名]
def __init__(
        self,
        bert_model,
        output_dim,
        add_transformer_layer=False,
        layer_pulled=-1,
        aggregation="first",
    ):
        super(BertWrapper, self).__init__()
        self.layer_pulled = layer_pulled
        self.aggregation = aggregation
        self.add_transformer_layer = add_transformer_layer
        # deduce bert output dim from the size of embeddings
        bert_output_dim = bert_model.embeddings.word_embeddings.weight.size(1)

        if add_transformer_layer:
            config_for_one_layer = BertConfig(
                0,
                hidden_size=bert_output_dim,
                num_attention_heads=int(bert_output_dim / 64),
                intermediate_size=3072,
                hidden_act='gelu',
            )
            self.additional_transformer_layer = BertLayer(config_for_one_layer)
        self.additional_linear_layer = torch.nn.Linear(bert_output_dim, output_dim)
        self.bert_model = bert_model 
开发者ID:facebookresearch,项目名称:ParlAI,代码行数:28,代码来源:helpers.py

示例4: __init__

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertConfig [as 别名]
def __init__(self, vocab_size, original_hidden_size, num_layers, tau=1):
        super().__init__()
        self.bert_layer = BertLayer(BertConfig(
            vocab_size_or_config_json_file=vocab_size,
            hidden_size=original_hidden_size * num_layers,
        ))
        self.linear_layer = nn.Linear(original_hidden_size * num_layers, 1)
        self.log_sigmoid = nn.LogSigmoid()
        self.tau = tau 
开发者ID:zphang,项目名称:bert_on_stilts,代码行数:11,代码来源:adv_masker.py

示例5: __init__

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertConfig [as 别名]
def __init__(self,
                 vocab: Vocabulary,
                 entity_linker: Model,
                 span_attention_config: Dict[str, int],
                 should_init_kg_to_bert_inverse: bool = True,
                 freeze: bool = False,
                 regularizer: RegularizerApplicator = None):
        super().__init__(vocab, regularizer)

        self.entity_linker = entity_linker
        self.entity_embedding_dim = self.entity_linker.disambiguator.entity_embedding_dim
        self.contextual_embedding_dim = self.entity_linker.disambiguator.contextual_embedding_dim

        self.weighted_entity_layer_norm = BertLayerNorm(self.entity_embedding_dim, eps=1e-5)
        init_bert_weights(self.weighted_entity_layer_norm, 0.02)

        self.dropout = torch.nn.Dropout(0.1)

        # the span attention layers
        assert len(span_attention_config) == 4
        config = BertConfig(
            0, # vocab size, not used
            hidden_size=span_attention_config['hidden_size'],
            num_hidden_layers=span_attention_config['num_hidden_layers'],
            num_attention_heads=span_attention_config['num_attention_heads'],
            intermediate_size=span_attention_config['intermediate_size']
        )
        self.span_attention_layer = SpanAttentionLayer(config)
        # already init inside span attention layer

        # for the output!
        self.output_layer_norm = BertLayerNorm(self.contextual_embedding_dim, eps=1e-5)

        self.kg_to_bert_projection = torch.nn.Linear(
                self.entity_embedding_dim, self.contextual_embedding_dim
        )

        self.should_init_kg_to_bert_inverse = should_init_kg_to_bert_inverse
        self._init_kg_to_bert_projection()

        self._freeze_all = freeze 
开发者ID:allenai,项目名称:kb,代码行数:43,代码来源:knowbert.py

示例6: __init__

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertConfig [as 别名]
def __init__(self, config: BertConfig):
        super().__init__()
        self.embeddings = BertEmbeddings(config)
        self.encoder = SanEncoder(
            config.hidden_size,
            config.num_hidden_layers,
            True,
            config.hidden_dropout_prob,
        )
        self.pooler = SanPooler(config.hidden_size, config.hidden_dropout_prob)
        self.config = config 
开发者ID:microsoft,项目名称:MT-DNN,代码行数:13,代码来源:san_model.py

示例7: __init__

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertConfig [as 别名]
def __init__(self, config: BertConfig):
        super().__init__()
        self.embeddings = BertEmbeddings(config)
        self.encoder = SanEncoder(config.hidden_size, config.num_hidden_layers, True, 
                                  config.hidden_dropout_prob)
        self.pooler = SanPooler(config.hidden_size, config.hidden_dropout_prob)
        self.config = config 
开发者ID:namisan,项目名称:mt-dnn,代码行数:9,代码来源:san_model.py


注:本文中的pytorch_pretrained_bert.modeling.BertConfig方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。