当前位置: 首页>>代码示例>>Python>>正文


Python BertModel.from_pretrained方法代码示例

本文整理汇总了Python中pytorch_transformers.BertModel.from_pretrained方法的典型用法代码示例。如果您正苦于以下问题:Python BertModel.from_pretrained方法的具体用法?Python BertModel.from_pretrained怎么用?Python BertModel.from_pretrained使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pytorch_transformers.BertModel的用法示例。


在下文中一共展示了BertModel.from_pretrained方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from pytorch_transformers import BertModel [as 别名]
# 或者: from pytorch_transformers.BertModel import from_pretrained [as 别名]
def __init__(
        self, token_makers, num_tags, ignore_tag_idx, pretrained_model_name=None, dropout=0.2
    ):

        super(BertForTokCls, self).__init__(token_makers)

        self.use_pytorch_transformers = True  # for optimizer's model parameters

        self.ignore_tag_idx = ignore_tag_idx
        self.num_tags = num_tags

        self._model = BertModel.from_pretrained(
            pretrained_model_name, cache_dir=str(CachePath.ROOT)
        )
        self.classifier = nn.Sequential(
            nn.Dropout(dropout), nn.Linear(self._model.config.hidden_size, num_tags)
        )
        self.classifier.apply(self._model.init_weights)

        self.criterion = nn.CrossEntropyLoss(ignore_index=ignore_tag_idx) 
开发者ID:naver,项目名称:claf,代码行数:22,代码来源:bert.py

示例2: __init__

# 需要导入模块: from pytorch_transformers import BertModel [as 别名]
# 或者: from pytorch_transformers.BertModel import from_pretrained [as 别名]
def __init__(self, token_makers, num_classes, pretrained_model_name=None, dropout=0.2):

        super(BertForSeqCls, self).__init__(token_makers)

        self.use_pytorch_transformers = True  # for optimizer's model parameters

        self.num_classes = num_classes

        self._model = BertModel.from_pretrained(
            pretrained_model_name, cache_dir=str(CachePath.ROOT)
        )
        self.classifier = nn.Sequential(
            nn.Dropout(dropout), nn.Linear(self._model.config.hidden_size, num_classes)
        )
        self.classifier.apply(self._model.init_weights)

        self.criterion = nn.CrossEntropyLoss() 
开发者ID:naver,项目名称:claf,代码行数:19,代码来源:bert.py

示例3: __init__

# 需要导入模块: from pytorch_transformers import BertModel [as 别名]
# 或者: from pytorch_transformers.BertModel import from_pretrained [as 别名]
def __init__(self, token_makers, tasks, pretrained_model_name=None, dropouts=None):
        super(BertForMultiTask, self).__init__(token_makers)

        self.use_pytorch_transformers = True  # for optimizer's model parameters
        self.tasks = tasks

        assert len(tasks) == len(dropouts)

        self.curr_task_category = None
        self.curr_dataset = None

        self.shared_layers = BertModel.from_pretrained(
            pretrained_model_name, cache_dir=str(CachePath.ROOT)
        )
        self._init_task_layers(tasks, dropouts)
        self._init_criterions(tasks) 
开发者ID:naver,项目名称:claf,代码行数:18,代码来源:bert.py

示例4: __init__

# 需要导入模块: from pytorch_transformers import BertModel [as 别名]
# 或者: from pytorch_transformers.BertModel import from_pretrained [as 别名]
def __init__(self, token_makers, pretrained_model_name=None, dropout=0.2):

        super(BertForRegression, self).__init__(token_makers)

        self.use_pytorch_transformers = True  # for optimizer's model parameters

        NUM_CLASSES = 1

        self._model = BertModel.from_pretrained(
            pretrained_model_name, cache_dir=str(CachePath.ROOT)
        )
        self.classifier = nn.Sequential(
            nn.Dropout(dropout), nn.Linear(self._model.config.hidden_size, NUM_CLASSES)
        )
        self.classifier.apply(self._model.init_weights)

        self.criterion = nn.MSELoss() 
开发者ID:naver,项目名称:claf,代码行数:19,代码来源:bert.py

示例5: __init__

# 需要导入模块: from pytorch_transformers import BertModel [as 别名]
# 或者: from pytorch_transformers.BertModel import from_pretrained [as 别名]
def __init__(self, device, config, index_to_tag, tag_to_index):
        super().__init__()

        self.bert = BertModel.from_pretrained('bert-base-uncased')

        self.FIXED_NR_OUTPUTS = 8 # FIXME: We may want to make this dynamic?
                                  # For now it is handcoded to the mapping below.

        self.fc = nn.Linear(768, self.FIXED_NR_OUTPUTS).to(device)

        self.device = device

        self.index_to_tag = index_to_tag
        self.tag_to_index = tag_to_index

        self.mapping = {'<pad>': [0, 0, 0, 0, 0, 0, 1, 0],  # <pad>
                        'NA'   : [0, 0, 0, 0, 0, 1, 0, 0],  # NA
                        '2'    : [1, 1, 1, 1, 1, 0, 0, 0],  # prosody value 2
                        '0'    : [1, 1, 1, 0, 0, 0, 0, 0],  # prosody value 0
                        '1'    : [1, 1, 1, 1, 0, 0, 0, 0]}  # prosody value 1 
开发者ID:Helsinki-NLP,项目名称:prosody,代码行数:22,代码来源:model.py

示例6: __init__

# 需要导入模块: from pytorch_transformers import BertModel [as 别名]
# 或者: from pytorch_transformers.BertModel import from_pretrained [as 别名]
def __init__(self, mode: str = 'bert-base-uncased'):
        """:class:`BertModule` constructor."""
        super().__init__()
        self.bert = BertModel.from_pretrained(mode) 
开发者ID:NTMC-Community,项目名称:MatchZoo-py,代码行数:6,代码来源:bert_module.py

示例7: __init__

# 需要导入模块: from pytorch_transformers import BertModel [as 别名]
# 或者: from pytorch_transformers.BertModel import from_pretrained [as 别名]
def __init__(self, large, temp_dir, finetune=False):
        super(Bert, self).__init__()
        if(large):
            self.model = BertModel.from_pretrained('bert-large-uncased', cache_dir=temp_dir)
        else:
            self.model = BertModel.from_pretrained('bert-base-uncased', cache_dir=temp_dir)

        self.finetune = finetune 
开发者ID:nlpyang,项目名称:PreSumm,代码行数:10,代码来源:model_builder.py

示例8: __init__

# 需要导入模块: from pytorch_transformers import BertModel [as 别名]
# 或者: from pytorch_transformers.BertModel import from_pretrained [as 别名]
def __init__(self, pretrained_model_name_for_tokenizer, max_vocabulary_size,
                 max_tokenization_length, embedding_dim, num_classes=1, num_recurrent_layers=1,
                 use_bidirectional=False, hidden_size=128, dropout_rate=0.10, use_gpu=False):
        super(SimpleRNN, self).__init__()
        self.num_recurrent_layers = num_recurrent_layers
        self.use_bidirectional = use_bidirectional
        self.hidden_size = hidden_size
        self.use_gpu = use_gpu

        # Configure tokenizer
        self.tokenizer = BertTokenizer.from_pretrained(pretrained_model_name_for_tokenizer)
        self.tokenizer.max_len = max_tokenization_length

        # Define additional layers & utilities specific to the finetuned task
        # Embedding Layer
        self.embedding = nn.Embedding(num_embeddings=max_vocabulary_size,
                                      embedding_dim=embedding_dim)

        # Dropout to prevent overfitting
        self.dropout = nn.Dropout(p=dropout_rate)

        # Recurrent Layer
        self.lstm = nn.LSTM(input_size=embedding_dim,
                            hidden_size=hidden_size,
                            num_layers=num_recurrent_layers,
                            bidirectional=use_bidirectional,
                            batch_first=True)

        # Dense Layer for Classification
        self.clf = nn.Linear(in_features=hidden_size*2 if use_bidirectional else hidden_size,
                             out_features=num_classes) 
开发者ID:uzaymacar,项目名称:comparatively-finetuning-bert,代码行数:33,代码来源:baseline_models.py

示例9: __init__

# 需要导入模块: from pytorch_transformers import BertModel [as 别名]
# 或者: from pytorch_transformers.BertModel import from_pretrained [as 别名]
def __init__(self, vocab, pretrained_model_name=None, trainable=False, unit="subword"):
        super(BertEmbedding, self).__init__(vocab)
        self.trainable = trainable

        self.pad_index = vocab.get_index(vocab.pad_token)
        self.sep_index = vocab.get_index(vocab.sep_token)

        if unit != "subword":
            raise NotImplementedError("BertEmbedding is only available 'subword' unit, right now.")

        self.bert_model = BertModel.from_pretrained(pretrained_model_name)  # BertModel with config 
开发者ID:naver,项目名称:claf,代码行数:13,代码来源:bert_embedding.py

示例10: __init__

# 需要导入模块: from pytorch_transformers import BertModel [as 别名]
# 或者: from pytorch_transformers.BertModel import from_pretrained [as 别名]
def __init__(self, config, gpu_list, *args, **params):
        super(BertEncoder, self).__init__()
        self.bert = BertModel.from_pretrained(config.get("model", "bert_path")) 
开发者ID:haoxizhong,项目名称:pytorch-worker,代码行数:5,代码来源:BasicBert.py

示例11: __init__

# 需要导入模块: from pytorch_transformers import BertModel [as 别名]
# 或者: from pytorch_transformers.BertModel import from_pretrained [as 别名]
def __init__(self, pretrain_path, max_length): 
        nn.Module.__init__(self)
        # self.bert = BertModel.from_pretrained(pretrain_path)
        self.bert = BertForSequenceClassification.from_pretrained(
                pretrain_path,
                num_labels=2)
        self.max_length = max_length
        self.tokenizer = BertTokenizer.from_pretrained(os.path.join(
            pretrain_path, 'bert_vocab.txt'))
        self.modelName = 'Bert' 
开发者ID:thunlp,项目名称:FewRel,代码行数:12,代码来源:ssss.py


注:本文中的pytorch_transformers.BertModel.from_pretrained方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。