当前位置: 首页>>代码示例>>Python>>正文


Python BertModel.from_pretrained方法代码示例

本文整理汇总了Python中transformers.BertModel.from_pretrained方法的典型用法代码示例。如果您正苦于以下问题:Python BertModel.from_pretrained方法的具体用法?Python BertModel.from_pretrained怎么用?Python BertModel.from_pretrained使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在transformers.BertModel的用法示例。


在下文中一共展示了BertModel.from_pretrained方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from transformers import BertModel [as 别名]
# 或者: from transformers.BertModel import from_pretrained [as 别名]
def __init__(self, pretrained_model_name_or_path=None, cache_dir=None,
                 finetune_ebd=False, return_seq=False):
        '''
            pretrained_model_name_or_path, cache_dir: check huggingface's codebase for details
            finetune_ebd: finetuning bert representation or not during
            meta-training
            return_seq: return a sequence of bert representations, or [cls]
        '''
        super(CXTEBD, self).__init__()

        self.finetune_ebd = finetune_ebd

        self.return_seq = return_seq

        print("{}, Loading pretrained bert".format(
            datetime.datetime.now().strftime('%02y/%02m/%02d %H:%M:%S')), flush=True)

        self.model = BertModel.from_pretrained(pretrained_model_name_or_path,
                                               cache_dir=cache_dir)

        self.embedding_dim = self.model.config.hidden_size
        self.ebd_dim = self.model.config.hidden_size 
开发者ID:YujiaBao,项目名称:Distributional-Signatures,代码行数:24,代码来源:cxtebd.py

示例2: main

# 需要导入模块: from transformers import BertModel [as 别名]
# 或者: from transformers.BertModel import from_pretrained [as 别名]
def main(raw_args=None):
    parser = argparse.ArgumentParser()
    parser.add_argument("--model_name", type=str, required=True, help="model name e.g. bert-base-uncased")
    parser.add_argument(
        "--cache_dir", type=str, default=None, required=False, help="Directory containing pytorch model"
    )
    parser.add_argument("--pytorch_model_path", type=str, required=True, help="/path/to/<pytorch-model-name>.bin")
    parser.add_argument("--tf_cache_dir", type=str, required=True, help="Directory in which to save tensorflow model")
    args = parser.parse_args(raw_args)

    model = BertModel.from_pretrained(
        pretrained_model_name_or_path=args.model_name,
        state_dict=torch.load(args.pytorch_model_path),
        cache_dir=args.cache_dir,
    )

    convert_pytorch_checkpoint_to_tf(model=model, ckpt_dir=args.tf_cache_dir, model_name=args.model_name) 
开发者ID:bhoov,项目名称:exbert,代码行数:19,代码来源:convert_bert_pytorch_checkpoint_to_original_tf.py

示例3: __init__

# 需要导入模块: from transformers import BertModel [as 别名]
# 或者: from transformers.BertModel import from_pretrained [as 别名]
def __init__(self, temp_dir, model_class, pretrained_model_name, pretrained_config):
        super(Transformer, self).__init__()
        if(pretrained_model_name):
            self.model = model_class.from_pretrained(pretrained_model_name,
                                                   cache_dir=temp_dir)
            #self.model = BertModel.from_pretrained('bert-base-uncased', cache_dir=temp_dir)
        else:
            self.model = model_class(pretrained_config) 
开发者ID:microsoft,项目名称:nlp-recipes,代码行数:10,代码来源:model_builder.py

示例4: __init__

# 需要导入模块: from transformers import BertModel [as 别名]
# 或者: from transformers.BertModel import from_pretrained [as 别名]
def __init__(self, max_length, pretrain_path, blank_padding=True, mask_entity=False):
        """
        Args:
            max_length: max length of sentence
            pretrain_path: path of pretrain model
        """
        super().__init__()
        self.max_length = max_length
        self.blank_padding = blank_padding
        self.hidden_size = 768
        self.mask_entity = mask_entity
        logging.info('Loading BERT pre-trained checkpoint.')
        self.bert = BertModel.from_pretrained(pretrain_path)
        self.tokenizer = BertTokenizer.from_pretrained(pretrain_path) 
开发者ID:thunlp,项目名称:OpenNRE,代码行数:16,代码来源:bert_encoder.py

示例5: __init__

# 需要导入模块: from transformers import BertModel [as 别名]
# 或者: from transformers.BertModel import from_pretrained [as 别名]
def __init__(self, model_config, device, slot_dim, intent_dim, intent_weight=None):
        super(JointBERT, self).__init__()
        self.slot_num_labels = slot_dim
        self.intent_num_labels = intent_dim
        self.device = device
        self.intent_weight = intent_weight if intent_weight is not None else torch.tensor([1.]*intent_dim)

        self.bert = BertModel.from_pretrained(model_config['pretrained_weights'])
        self.dropout = nn.Dropout(model_config['dropout'])
        self.context = model_config['context']
        self.finetune = model_config['finetune']
        self.context_grad = model_config['context_grad']
        if self.context:
            self.intent_classifier = nn.Linear(2 * self.bert.config.hidden_size, self.intent_num_labels)
            self.slot_classifier = nn.Linear(2 * self.bert.config.hidden_size, self.slot_num_labels)
            self.intent_hidden = nn.Linear(2 * self.bert.config.hidden_size, 2 * self.bert.config.hidden_size)
            self.slot_hidden = nn.Linear(2 * self.bert.config.hidden_size, 2 * self.bert.config.hidden_size)
        else:
            self.intent_classifier = nn.Linear(self.bert.config.hidden_size, self.intent_num_labels)
            self.slot_classifier = nn.Linear(self.bert.config.hidden_size, self.slot_num_labels)
            self.intent_hidden = nn.Linear(self.bert.config.hidden_size, self.bert.config.hidden_size)
            self.slot_hidden = nn.Linear(self.bert.config.hidden_size, self.bert.config.hidden_size)

        nn.init.xavier_uniform_(self.intent_hidden.weight)
        nn.init.xavier_uniform_(self.slot_hidden.weight)
        nn.init.xavier_uniform_(self.intent_classifier.weight)
        nn.init.xavier_uniform_(self.slot_classifier.weight)

        self.intent_loss_fct = torch.nn.BCEWithLogitsLoss(pos_weight=self.intent_weight)
        self.slot_loss_fct = torch.nn.CrossEntropyLoss() 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:32,代码来源:jointBERT.py

示例6: __init__

# 需要导入模块: from transformers import BertModel [as 别名]
# 或者: from transformers.BertModel import from_pretrained [as 别名]
def __init__(self, model_name_or_path: str, max_seq_length: int = 128, do_lower_case: Optional[bool] = None, model_args: Dict = {}, tokenizer_args: Dict = {}):
        super(BERT, self).__init__()
        self.config_keys = ['max_seq_length', 'do_lower_case']
        self.do_lower_case = do_lower_case

        if max_seq_length > 510:
            logging.warning("BERT only allows a max_seq_length of 510 (512 with special tokens). Value will be set to 510")
            max_seq_length = 510
        self.max_seq_length = max_seq_length

        if self.do_lower_case is not None:
            tokenizer_args['do_lower_case'] = do_lower_case

        self.bert = BertModel.from_pretrained(model_name_or_path, **model_args)
        self.tokenizer = BertTokenizer.from_pretrained(model_name_or_path, **tokenizer_args) 
开发者ID:UKPLab,项目名称:sentence-transformers,代码行数:17,代码来源:BERT.py

示例7: __init__

# 需要导入模块: from transformers import BertModel [as 别名]
# 或者: from transformers.BertModel import from_pretrained [as 别名]
def __init__(self, model, n_layers, n_out, requires_grad=False):
        super(BertEmbedding, self).__init__()

        self.bert = BertModel.from_pretrained(model, output_hidden_states=True)
        self.bert = self.bert.requires_grad_(requires_grad)
        self.n_layers = n_layers
        self.n_out = n_out
        self.requires_grad = requires_grad
        self.hidden_size = self.bert.config.hidden_size

        self.scalar_mix = ScalarMix(n_layers)
        self.projection = nn.Linear(self.hidden_size, n_out, False) 
开发者ID:yzhangcs,项目名称:parser,代码行数:14,代码来源:bert.py

示例8: __init__

# 需要导入模块: from transformers import BertModel [as 别名]
# 或者: from transformers.BertModel import from_pretrained [as 别名]
def __init__(self, model_config, device, slot_dim, intent_dim, intent_weight=None):
        super(JointBERT, self).__init__()
        self.slot_num_labels = slot_dim
        self.intent_num_labels = intent_dim
        self.device = device
        self.intent_weight = intent_weight if intent_weight is not None else torch.tensor([1.]*intent_dim)

        self.bert = BertModel.from_pretrained(model_config['pretrained_weights'])
        self.dropout = nn.Dropout(model_config['dropout'])
        self.context = model_config['context']
        self.finetune = model_config['finetune']
        self.context_grad = model_config['context_grad']
        self.hidden_units = model_config['hidden_units']
        if self.hidden_units > 0:
            if self.context:
                self.intent_classifier = nn.Linear(self.hidden_units, self.intent_num_labels)
                self.slot_classifier = nn.Linear(self.hidden_units, self.slot_num_labels)
                self.intent_hidden = nn.Linear(2 * self.bert.config.hidden_size, self.hidden_units)
                self.slot_hidden = nn.Linear(2 * self.bert.config.hidden_size, self.hidden_units)
            else:
                self.intent_classifier = nn.Linear(self.hidden_units, self.intent_num_labels)
                self.slot_classifier = nn.Linear(self.hidden_units, self.slot_num_labels)
                self.intent_hidden = nn.Linear(self.bert.config.hidden_size, self.hidden_units)
                self.slot_hidden = nn.Linear(self.bert.config.hidden_size, self.hidden_units)
            nn.init.xavier_uniform_(self.intent_hidden.weight)
            nn.init.xavier_uniform_(self.slot_hidden.weight)
        else:
            if self.context:
                self.intent_classifier = nn.Linear(2 * self.bert.config.hidden_size, self.intent_num_labels)
                self.slot_classifier = nn.Linear(2 * self.bert.config.hidden_size, self.slot_num_labels)
            else:
                self.intent_classifier = nn.Linear(self.bert.config.hidden_size, self.intent_num_labels)
                self.slot_classifier = nn.Linear(self.bert.config.hidden_size, self.slot_num_labels)
        nn.init.xavier_uniform_(self.intent_classifier.weight)
        nn.init.xavier_uniform_(self.slot_classifier.weight)

        self.intent_loss_fct = torch.nn.BCEWithLogitsLoss(pos_weight=self.intent_weight)
        self.slot_loss_fct = torch.nn.CrossEntropyLoss() 
开发者ID:thu-coai,项目名称:tatk,代码行数:40,代码来源:jointBERT.py

示例9: __init__

# 需要导入模块: from transformers import BertModel [as 别名]
# 或者: from transformers.BertModel import from_pretrained [as 别名]
def __init__(
            self,
            class_size=None,
            pretrained_model="gpt2-medium",
            classifier_head=None,
            cached_mode=False,
            device='cpu'
    ):
        super(Discriminator, self).__init__()
        if pretrained_model.startswith("gpt2"):
            self.tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model)
            self.encoder = GPT2LMHeadModel.from_pretrained(pretrained_model)
            self.embed_size = self.encoder.transformer.config.hidden_size
        elif pretrained_model.startswith("bert"):
            self.tokenizer = BertTokenizer.from_pretrained(pretrained_model)
            self.encoder = BertModel.from_pretrained(pretrained_model)
            self.embed_size = self.encoder.config.hidden_size
        else:
            raise ValueError(
                "{} model not yet supported".format(pretrained_model)
            )
        if classifier_head:
            self.classifier_head = classifier_head
        else:
            if not class_size:
                raise ValueError("must specify class_size")
            self.classifier_head = ClassificationHead(
                class_size=class_size,
                embed_size=self.embed_size
            )
        self.cached_mode = cached_mode
        self.device = device 
开发者ID:uber-research,项目名称:PPLM,代码行数:34,代码来源:run_pplm_discrim_train.py

示例10: __init__

# 需要导入模块: from transformers import BertModel [as 别名]
# 或者: from transformers.BertModel import from_pretrained [as 别名]
def __init__(self, config: BertEmbeddingLayerConfig):
        super(BertEmbeddingLayer, self).__init__(config)
        self.embedding = BertModel.from_pretrained(self.config.model_dir) 
开发者ID:luopeixiang,项目名称:textclf,代码行数:5,代码来源:embedding_layer.py

示例11: __init__

# 需要导入模块: from transformers import BertModel [as 别名]
# 或者: from transformers.BertModel import from_pretrained [as 别名]
def __init__(self, pretrain_path, max_length, cat_entity_rep=False): 
        nn.Module.__init__(self)
        self.bert = BertModel.from_pretrained(pretrain_path)
        self.max_length = max_length
        self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        self.cat_entity_rep = cat_entity_rep 
开发者ID:thunlp,项目名称:FewRel,代码行数:8,代码来源:sentence_encoder.py

示例12: __init__

# 需要导入模块: from transformers import BertModel [as 别名]
# 或者: from transformers.BertModel import from_pretrained [as 别名]
def __init__(self, max_length, pretrain_path, blank_padding=True):
        """
        Args:
            max_length: max length of sentence
            pretrain_path: path of pretrain model
        """
        super().__init__()
        self.max_length = max_length
        self.blank_padding = blank_padding
        self.bert = BertModel.from_pretrained(pretrain_path)
        self.hidden_size = self.bert.config.hidden_size
        self.tokenizer = BertTokenizer.from_pretrained(pretrain_path) 
开发者ID:slczgwh,项目名称:REDN,代码行数:14,代码来源:bert_encoder.py


注:本文中的transformers.BertModel.from_pretrained方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。