当前位置: 首页>>代码示例>>Python>>正文


Python modeling.BertModel方法代码示例

本文整理汇总了Python中pytorch_pretrained_bert.modeling.BertModel方法的典型用法代码示例。如果您正苦于以下问题:Python modeling.BertModel方法的具体用法?Python modeling.BertModel怎么用?Python modeling.BertModel使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pytorch_pretrained_bert.modeling的用法示例。


在下文中一共展示了modeling.BertModel方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertModel [as 别名]
def forward(self, input_ids,
                token_type_ids=None,
                attention_mask=None,
                output_all_encoded_layers=True,
                token_subword_index=None):
        """
        :param input_ids: same as it in BertModel
        :param token_type_ids: same as it in BertModel
        :param attention_mask: same as it in BertModel
        :param output_all_encoded_layers: same as it in BertModel
        :param token_subword_index: [batch_size, num_tokens, num_subwords]
        :return:
        """
        # encoded_layers: [batch_size, num_subword_pieces, hidden_size]
        encoded_layers, pooled_output = super(Seq2SeqBertEncoder, self).forward(
            input_ids, token_type_ids, attention_mask, output_all_encoded_layers)
        if token_subword_index is None:
            return encoded_layers, pooled_output
        else:
            return self.average_pooling(encoded_layers, token_subword_index), pooled_output 
开发者ID:jcyk,项目名称:gtos,代码行数:22,代码来源:seq2seq_bert_encoder.py

示例2: __init__

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertModel [as 别名]
def __init__(self, config):
        super(BertCNNForTripletNet, self).__init__(config)

        filters = [3, 4, 5]

        self.bert = BertModel(config)
        self.embedding_dropout = SpatialDropout1D(config.hidden_dropout_prob)

        self.conv_layers = nn.ModuleList()
        for filter_size in filters:
            conv_block = nn.Sequential(
                nn.Conv1d(
                    config.hidden_size,
                    CHANNEL_UNITS,
                    kernel_size=filter_size,
                    padding=1,
                ),
                # nn.BatchNorm1d(CHANNEL_UNITS),
                # nn.ReLU(inplace=True),
            )
            self.conv_layers.append(conv_block)

        self.apply(self.init_bert_weights) 
开发者ID:GuidoPaul,项目名称:CAIL2019,代码行数:25,代码来源:net.py

示例3: __init__

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertModel [as 别名]
def __init__(self, config, answer_verification=True, hidden_dropout_prob=0.3):
        super(CailModel, self).__init__(config)
        self.bert = BertModel(config)
        # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
        # self.qa_dropout = nn.Dropout(config.hidden_dropout_prob)
        self.qa_outputs = nn.Linear(config.hidden_size*4, 2)
        self.apply(self.init_bert_weights)
        self.answer_verification = answer_verification
        if self.answer_verification:
            self.retionale_outputs = nn.Linear(config.hidden_size*4, 1)
            self.unk_ouputs = nn.Linear(config.hidden_size, 1)
            self.doc_att = nn.Linear(config.hidden_size*4, 1)
            self.yes_no_ouputs = nn.Linear(config.hidden_size*4, 2)
            self.ouputs_cls_3 = nn.Linear(config.hidden_size*4, 3)

            self.beta = 100
        else:
            # self.unk_yes_no_outputs_dropout = nn.Dropout(config.hidden_dropout_prob)
            self.unk_yes_no_outputs = nn.Linear(config.hidden_size, 3) 
开发者ID:NoneWait,项目名称:cail2019,代码行数:21,代码来源:CailModel.py

示例4: __init__

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertModel [as 别名]
def __init__(self, config):
        super(BertForQuestionAnswering, self).__init__(config)
        self.bert = BertModel(config)
        self.qa_outputs = nn.Linear(config.hidden_size, 1)
        self.apply(self.init_bert_weights) 
开发者ID:ymcui,项目名称:cmrc2019,代码行数:7,代码来源:run_cmrc2019_baseline.py

示例5: __init__

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertModel [as 别名]
def __init__(self, config):
        super(BertQA, self).__init__(config)
        self.bert = BertModel(config)

        self.qa_outputs = nn.Linear(config.hidden_size, 2)
        self.apply(self.init_bert_weights) 
开发者ID:pranciskus,项目名称:mrc-for-flat-nested-ner,代码行数:8,代码来源:bert_qa.py

示例6: __init__

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertModel [as 别名]
def __init__(self, config, num_entity_labels):
        super(BertForBasicNER, self).__init__(config)
        self.bert = BertModel(config)

        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(config.hidden_size, num_entity_labels)
        self.apply(self.init_bert_weights)

        self.num_entity_labels = num_entity_labels 
开发者ID:dolphin-zs,项目名称:Doc2EDAG,代码行数:11,代码来源:ner_model.py

示例7: __init__

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertModel [as 别名]
def __init__(self, config):
        super(BertForUtteranceEncoding, self).__init__(config)

        self.config = config
        self.bert = BertModel(config) 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:7,代码来源:BeliefTrackerSlotQueryMultiSlot.py

示例8: __init__

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertModel [as 别名]
def __init__(self, config):
        super(BERT_classifer, self).__init__(config)

        self.num_labels = NUM_EMO
        self.bert = BertModel(config)
        self.dropout = nn.Dropout(0.1)
        self.apply(self.init_bert_weights)
        self.bert_out_dim = None
        self.out2label = None
        self.out2binary = None
        self.out2emo = None 
开发者ID:chenyangh,项目名称:SemEval2019Task3,代码行数:13,代码来源:bert.py

示例9: __init__

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertModel [as 别名]
def __init__(self, config, num_labels):
        super().__init__(config)
        self.num_labels = num_labels
        self.bert = BertModel(config)

        self.pooler = BertPooler(config)

        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(1*config.hidden_size, num_labels)
        self.apply(self.init_bert_weights) 
开发者ID:sattree,项目名称:gap,代码行数:12,代码来源:probert.py

示例10: __init__

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertModel [as 别名]
def __init__(self, config, num_labels):
        super().__init__(config)
        self.num_labels = num_labels
        self.bert = BertModel(config)

        self.pooler = BertPooler(config)

        self.evidence_pooler_p = EvidencePooler(config)

        self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.classifier = nn.Linear(2 * config.hidden_size, num_labels)

        torch.nn.init.xavier_uniform_(self.classifier.weight)

        self.apply(self.init_bert_weights) 
开发者ID:sattree,项目名称:gap,代码行数:17,代码来源:grep.py

示例11: __init__

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertModel [as 别名]
def __init__(self, opt, bert_config=None):
        super(SANBertNetwork, self).__init__()
        self.dropout_list = nn.ModuleList()
        self.bert_config = BertConfig.from_dict(opt)
        self.bert = BertModel(self.bert_config)
        if opt['update_bert_opt'] > 0:
            for p in self.bert.parameters():
                p.requires_grad = False
        mem_size = self.bert_config.hidden_size
        self.decoder_opt = opt['answer_opt']
        self.scoring_list = nn.ModuleList()
        labels = [int(ls) for ls in opt['label_size'].split(',')]
        task_dropout_p = opt['tasks_dropout_p']
        self.bert_pooler = None

        for task, lab in enumerate(labels):
            decoder_opt = self.decoder_opt[task]
            dropout = DropoutWrapper(task_dropout_p[task], opt['vb_dropout'])
            self.dropout_list.append(dropout)
            if decoder_opt == 1:
                out_proj = SANClassifier(mem_size, mem_size, lab, opt, prefix='answer', dropout=dropout)
                self.scoring_list.append(out_proj)
            else:
                out_proj = nn.Linear(self.bert_config.hidden_size, lab)
                self.scoring_list.append(out_proj)

        self.opt = opt
        self._my_init()
        self.set_embed(opt) 
开发者ID:RTIInternational,项目名称:gobbli,代码行数:31,代码来源:matcher.py

示例12: __init__

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertModel [as 别名]
def __init__(self, config, trainable=False):
        super(BertForLabelEncoding, self).__init__(config)

        self.config = config
        self.bert = BertModel(config)
        #self.apply(self.init_bert_weights)     # don't need to perform due to pre-trained params loading

        if not trainable:
            for p in self.bert.parameters():
                p.requires_grad = False 
开发者ID:SKTBrain,项目名称:SUMBT,代码行数:12,代码来源:BertForLabelEncoding.py

示例13: __init__

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertModel [as 别名]
def __init__(self, config):
        super(BertForUtteranceEncoding, self).__init__(config)

        self.config = config
        self.bert = BertModel(config)
        #self.apply(self.init_bert_weights)     # don't need to perform due to pre-trained params loading 
开发者ID:SKTBrain,项目名称:SUMBT,代码行数:8,代码来源:BertForUtteranceEncoding.py

示例14: __init__

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertModel [as 别名]
def __init__(self,config):
        super(DMBERT_Encoder,self).__init__(config)
        self.bert=BertModel(config)
        self.dropout=nn.Dropout(p=keepProb)
        #self.M=nn.Linear(EncodedDim,dimE)
        self.maxpooling=nn.MaxPool1d(SenLen) 
开发者ID:thunlp,项目名称:Adv-ED,代码行数:8,代码来源:models.py

示例15: __init__

# 需要导入模块: from pytorch_pretrained_bert import modeling [as 别名]
# 或者: from pytorch_pretrained_bert.modeling import BertModel [as 别名]
def __init__(self, config, num_labels=3):
        super(BertForSequenceLabeling, self).__init__(config)
        self.num_labels = num_labels
        self.bert = BertModel(config)
        self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)
        self.classifier = torch.nn.Linear(config.hidden_size, num_labels)
        self.apply(self.init_bert_weights) 
开发者ID:howardhsu,项目名称:BERT-for-RRC-ABSA,代码行数:9,代码来源:run_ae.py


注:本文中的pytorch_pretrained_bert.modeling.BertModel方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。