當前位置: 首頁>>代碼示例>>Python>>正文


Python BertModel.from_pretrained方法代碼示例

本文整理匯總了Python中pytorch_transformers.BertModel.from_pretrained方法的典型用法代碼示例。如果您正苦於以下問題:Python BertModel.from_pretrained方法的具體用法?Python BertModel.from_pretrained怎麽用?Python BertModel.from_pretrained使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在pytorch_transformers.BertModel的用法示例。


在下文中一共展示了BertModel.from_pretrained方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from pytorch_transformers import BertModel [as 別名]
# 或者: from pytorch_transformers.BertModel import from_pretrained [as 別名]
def __init__(
        self, token_makers, num_tags, ignore_tag_idx, pretrained_model_name=None, dropout=0.2
    ):

        super(BertForTokCls, self).__init__(token_makers)

        self.use_pytorch_transformers = True  # for optimizer's model parameters

        self.ignore_tag_idx = ignore_tag_idx
        self.num_tags = num_tags

        self._model = BertModel.from_pretrained(
            pretrained_model_name, cache_dir=str(CachePath.ROOT)
        )
        self.classifier = nn.Sequential(
            nn.Dropout(dropout), nn.Linear(self._model.config.hidden_size, num_tags)
        )
        self.classifier.apply(self._model.init_weights)

        self.criterion = nn.CrossEntropyLoss(ignore_index=ignore_tag_idx) 
開發者ID:naver,項目名稱:claf,代碼行數:22,代碼來源:bert.py

示例2: __init__

# 需要導入模塊: from pytorch_transformers import BertModel [as 別名]
# 或者: from pytorch_transformers.BertModel import from_pretrained [as 別名]
def __init__(self, token_makers, num_classes, pretrained_model_name=None, dropout=0.2):

        super(BertForSeqCls, self).__init__(token_makers)

        self.use_pytorch_transformers = True  # for optimizer's model parameters

        self.num_classes = num_classes

        self._model = BertModel.from_pretrained(
            pretrained_model_name, cache_dir=str(CachePath.ROOT)
        )
        self.classifier = nn.Sequential(
            nn.Dropout(dropout), nn.Linear(self._model.config.hidden_size, num_classes)
        )
        self.classifier.apply(self._model.init_weights)

        self.criterion = nn.CrossEntropyLoss() 
開發者ID:naver,項目名稱:claf,代碼行數:19,代碼來源:bert.py

示例3: __init__

# 需要導入模塊: from pytorch_transformers import BertModel [as 別名]
# 或者: from pytorch_transformers.BertModel import from_pretrained [as 別名]
def __init__(self, token_makers, tasks, pretrained_model_name=None, dropouts=None):
        super(BertForMultiTask, self).__init__(token_makers)

        self.use_pytorch_transformers = True  # for optimizer's model parameters
        self.tasks = tasks

        assert len(tasks) == len(dropouts)

        self.curr_task_category = None
        self.curr_dataset = None

        self.shared_layers = BertModel.from_pretrained(
            pretrained_model_name, cache_dir=str(CachePath.ROOT)
        )
        self._init_task_layers(tasks, dropouts)
        self._init_criterions(tasks) 
開發者ID:naver,項目名稱:claf,代碼行數:18,代碼來源:bert.py

示例4: __init__

# 需要導入模塊: from pytorch_transformers import BertModel [as 別名]
# 或者: from pytorch_transformers.BertModel import from_pretrained [as 別名]
def __init__(self, token_makers, pretrained_model_name=None, dropout=0.2):

        super(BertForRegression, self).__init__(token_makers)

        self.use_pytorch_transformers = True  # for optimizer's model parameters

        NUM_CLASSES = 1

        self._model = BertModel.from_pretrained(
            pretrained_model_name, cache_dir=str(CachePath.ROOT)
        )
        self.classifier = nn.Sequential(
            nn.Dropout(dropout), nn.Linear(self._model.config.hidden_size, NUM_CLASSES)
        )
        self.classifier.apply(self._model.init_weights)

        self.criterion = nn.MSELoss() 
開發者ID:naver,項目名稱:claf,代碼行數:19,代碼來源:bert.py

示例5: __init__

# 需要導入模塊: from pytorch_transformers import BertModel [as 別名]
# 或者: from pytorch_transformers.BertModel import from_pretrained [as 別名]
def __init__(self, device, config, index_to_tag, tag_to_index):
        super().__init__()

        self.bert = BertModel.from_pretrained('bert-base-uncased')

        self.FIXED_NR_OUTPUTS = 8 # FIXME: We may want to make this dynamic?
                                  # For now it is handcoded to the mapping below.

        self.fc = nn.Linear(768, self.FIXED_NR_OUTPUTS).to(device)

        self.device = device

        self.index_to_tag = index_to_tag
        self.tag_to_index = tag_to_index

        self.mapping = {'<pad>': [0, 0, 0, 0, 0, 0, 1, 0],  # <pad>
                        'NA'   : [0, 0, 0, 0, 0, 1, 0, 0],  # NA
                        '2'    : [1, 1, 1, 1, 1, 0, 0, 0],  # prosody value 2
                        '0'    : [1, 1, 1, 0, 0, 0, 0, 0],  # prosody value 0
                        '1'    : [1, 1, 1, 1, 0, 0, 0, 0]}  # prosody value 1 
開發者ID:Helsinki-NLP,項目名稱:prosody,代碼行數:22,代碼來源:model.py

示例6: __init__

# 需要導入模塊: from pytorch_transformers import BertModel [as 別名]
# 或者: from pytorch_transformers.BertModel import from_pretrained [as 別名]
def __init__(self, mode: str = 'bert-base-uncased'):
        """:class:`BertModule` constructor."""
        super().__init__()
        self.bert = BertModel.from_pretrained(mode) 
開發者ID:NTMC-Community,項目名稱:MatchZoo-py,代碼行數:6,代碼來源:bert_module.py

示例7: __init__

# 需要導入模塊: from pytorch_transformers import BertModel [as 別名]
# 或者: from pytorch_transformers.BertModel import from_pretrained [as 別名]
def __init__(self, large, temp_dir, finetune=False):
        super(Bert, self).__init__()
        if(large):
            self.model = BertModel.from_pretrained('bert-large-uncased', cache_dir=temp_dir)
        else:
            self.model = BertModel.from_pretrained('bert-base-uncased', cache_dir=temp_dir)

        self.finetune = finetune 
開發者ID:nlpyang,項目名稱:PreSumm,代碼行數:10,代碼來源:model_builder.py

示例8: __init__

# 需要導入模塊: from pytorch_transformers import BertModel [as 別名]
# 或者: from pytorch_transformers.BertModel import from_pretrained [as 別名]
def __init__(self, pretrained_model_name_for_tokenizer, max_vocabulary_size,
                 max_tokenization_length, embedding_dim, num_classes=1, num_recurrent_layers=1,
                 use_bidirectional=False, hidden_size=128, dropout_rate=0.10, use_gpu=False):
        super(SimpleRNN, self).__init__()
        self.num_recurrent_layers = num_recurrent_layers
        self.use_bidirectional = use_bidirectional
        self.hidden_size = hidden_size
        self.use_gpu = use_gpu

        # Configure tokenizer
        self.tokenizer = BertTokenizer.from_pretrained(pretrained_model_name_for_tokenizer)
        self.tokenizer.max_len = max_tokenization_length

        # Define additional layers & utilities specific to the finetuned task
        # Embedding Layer
        self.embedding = nn.Embedding(num_embeddings=max_vocabulary_size,
                                      embedding_dim=embedding_dim)

        # Dropout to prevent overfitting
        self.dropout = nn.Dropout(p=dropout_rate)

        # Recurrent Layer
        self.lstm = nn.LSTM(input_size=embedding_dim,
                            hidden_size=hidden_size,
                            num_layers=num_recurrent_layers,
                            bidirectional=use_bidirectional,
                            batch_first=True)

        # Dense Layer for Classification
        self.clf = nn.Linear(in_features=hidden_size*2 if use_bidirectional else hidden_size,
                             out_features=num_classes) 
開發者ID:uzaymacar,項目名稱:comparatively-finetuning-bert,代碼行數:33,代碼來源:baseline_models.py

示例9: __init__

# 需要導入模塊: from pytorch_transformers import BertModel [as 別名]
# 或者: from pytorch_transformers.BertModel import from_pretrained [as 別名]
def __init__(self, vocab, pretrained_model_name=None, trainable=False, unit="subword"):
        super(BertEmbedding, self).__init__(vocab)
        self.trainable = trainable

        self.pad_index = vocab.get_index(vocab.pad_token)
        self.sep_index = vocab.get_index(vocab.sep_token)

        if unit != "subword":
            raise NotImplementedError("BertEmbedding is only available 'subword' unit, right now.")

        self.bert_model = BertModel.from_pretrained(pretrained_model_name)  # BertModel with config 
開發者ID:naver,項目名稱:claf,代碼行數:13,代碼來源:bert_embedding.py

示例10: __init__

# 需要導入模塊: from pytorch_transformers import BertModel [as 別名]
# 或者: from pytorch_transformers.BertModel import from_pretrained [as 別名]
def __init__(self, config, gpu_list, *args, **params):
        super(BertEncoder, self).__init__()
        self.bert = BertModel.from_pretrained(config.get("model", "bert_path")) 
開發者ID:haoxizhong,項目名稱:pytorch-worker,代碼行數:5,代碼來源:BasicBert.py

示例11: __init__

# 需要導入模塊: from pytorch_transformers import BertModel [as 別名]
# 或者: from pytorch_transformers.BertModel import from_pretrained [as 別名]
def __init__(self, pretrain_path, max_length): 
        nn.Module.__init__(self)
        # self.bert = BertModel.from_pretrained(pretrain_path)
        self.bert = BertForSequenceClassification.from_pretrained(
                pretrain_path,
                num_labels=2)
        self.max_length = max_length
        self.tokenizer = BertTokenizer.from_pretrained(os.path.join(
            pretrain_path, 'bert_vocab.txt'))
        self.modelName = 'Bert' 
開發者ID:thunlp,項目名稱:FewRel,代碼行數:12,代碼來源:ssss.py


注:本文中的pytorch_transformers.BertModel.from_pretrained方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。