当前位置: 首页>>代码示例>>Python>>正文


Python transformers.BertModel方法代码示例

本文整理汇总了Python中transformers.BertModel方法的典型用法代码示例。如果您正苦于以下问题:Python transformers.BertModel方法的具体用法?Python transformers.BertModel怎么用?Python transformers.BertModel使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在transformers的用法示例。


在下文中一共展示了transformers.BertModel方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import BertModel [as 别名]
def __init__(self, config):
        super().__init__(config, num_labels=config.num_labels)
        self.bert = BertModel(config)
        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.init_weights() 
开发者ID:castorini,项目名称:hedwig,代码行数:7,代码来源:sentence_encoder.py

示例2: __init__

# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import BertModel [as 别名]
def __init__(self):
        super().__init__()
        config = BertConfig.from_pretrained("bert-base-uncased")
        self.model = BertModel(config) 
开发者ID:bhoov,项目名称:exbert,代码行数:6,代码来源:modeling_bertabs.py

示例3: __init__

# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import BertModel [as 别名]
def __init__(self):
        super(Bert, self).__init__()
        config = BertConfig.from_pretrained("bert-base-uncased")
        self.model = BertModel(config) 
开发者ID:kaushaltrivedi,项目名称:fast-bert,代码行数:6,代码来源:modeling_bertabs.py

示例4: make_model

# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import BertModel [as 别名]
def make_model(self, src_vocab, tgt_vocab, N_enc=6, N_dec=6, 
               d_model=512, d_ff=2048, h=8, dropout=0.1):
        "Helper: Construct a model from hyperparameters."
        enc_config = BertConfig(vocab_size=1,
                                hidden_size=d_model,
                                num_hidden_layers=N_enc,
                                num_attention_heads=h,
                                intermediate_size=d_ff,
                                hidden_dropout_prob=dropout,
                                attention_probs_dropout_prob=dropout,
                                max_position_embeddings=1,
                                type_vocab_size=1)
        dec_config = BertConfig(vocab_size=tgt_vocab,
                                hidden_size=d_model,
                                num_hidden_layers=N_dec,
                                num_attention_heads=h,
                                intermediate_size=d_ff,
                                hidden_dropout_prob=dropout,
                                attention_probs_dropout_prob=dropout,
                                max_position_embeddings=17,
                                type_vocab_size=1,
                                is_decoder=True)
        encoder = BertModel(enc_config)
        def return_embeds(*args, **kwargs):
            return kwargs['inputs_embeds']
        del encoder.embeddings; encoder.embeddings = return_embeds
        decoder = BertModel(dec_config)
        model = EncoderDecoder(
            encoder,
            decoder,
            Generator(d_model, tgt_vocab))
        return model 
开发者ID:ruotianluo,项目名称:self-critical.pytorch,代码行数:34,代码来源:BertCapModel.py

示例5: load

# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import BertModel [as 别名]
def load(self):
        self.model = transformers.BertModel.from_pretrained(self.load_path, config=self.config).eval().to(self.device)
        self.dim = self.model.config.hidden_size 
开发者ID:deepmipt,项目名称:DeepPavlov,代码行数:5,代码来源:transformers_embedder.py

示例6: __init__

# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import BertModel [as 别名]
def __init__(self, bert_config):
        """

        :param bert_config: configuration for bert model
        """
        super(BertABSATagger, self).__init__(bert_config)
        self.num_labels = bert_config.num_labels
        self.tagger_config = TaggerConfig()
        self.tagger_config.absa_type = bert_config.absa_type.lower()
        if bert_config.tfm_mode == 'finetune':
            # initialized with pre-trained BERT and perform finetuning
            # print("Fine-tuning the pre-trained BERT...")
            self.bert = BertModel(bert_config)
        else:
            raise Exception("Invalid transformer mode %s!!!" % bert_config.tfm_mode)
        self.bert_dropout = nn.Dropout(bert_config.hidden_dropout_prob)
        # fix the parameters in BERT and regard it as feature extractor
        if bert_config.fix_tfm:
            # fix the parameters of the (pre-trained or randomly initialized) transformers during fine-tuning
            for p in self.bert.parameters():
                p.requires_grad = False

        self.tagger = None
        if self.tagger_config.absa_type == 'linear':
            # hidden size at the penultimate layer
            penultimate_hidden_size = bert_config.hidden_size
        else:
            self.tagger_dropout = nn.Dropout(self.tagger_config.hidden_dropout_prob)
            if self.tagger_config.absa_type == 'lstm':
                self.tagger = LSTM(input_size=bert_config.hidden_size,
                                   hidden_size=self.tagger_config.hidden_size,
                                   bidirectional=self.tagger_config.bidirectional)
            elif self.tagger_config.absa_type == 'gru':
                self.tagger = GRU(input_size=bert_config.hidden_size,
                                  hidden_size=self.tagger_config.hidden_size,
                                  bidirectional=self.tagger_config.bidirectional)
            elif self.tagger_config.absa_type == 'tfm':
                # transformer encoder layer
                self.tagger = nn.TransformerEncoderLayer(d_model=bert_config.hidden_size,
                                                         nhead=12,
                                                         dim_feedforward=4*bert_config.hidden_size,
                                                         dropout=0.1)
            elif self.tagger_config.absa_type == 'san':
                # vanilla self attention networks
                self.tagger = SAN(d_model=bert_config.hidden_size, nhead=12, dropout=0.1)
            elif self.tagger_config.absa_type == 'crf':
                self.tagger = CRF(num_tags=self.num_labels)
            else:
                raise Exception('Unimplemented downstream tagger %s...' % self.tagger_config.absa_type)
            penultimate_hidden_size = self.tagger_config.hidden_size
        self.classifier = nn.Linear(penultimate_hidden_size, bert_config.num_labels) 
开发者ID:lixin4ever,项目名称:BERT-E2E-ABSA,代码行数:53,代码来源:absa_layer.py


注:本文中的transformers.BertModel方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。