當前位置: 首頁>>代碼示例>>Python>>正文


Python BertModel.from_pretrained方法代碼示例

本文整理匯總了Python中pytorch_pretrained_bert.modeling.BertModel.from_pretrained方法的典型用法代碼示例。如果您正苦於以下問題:Python BertModel.from_pretrained方法的具體用法?Python BertModel.from_pretrained怎麽用?Python BertModel.from_pretrained使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在pytorch_pretrained_bert.modeling.BertModel的用法示例。


在下文中一共展示了BertModel.from_pretrained方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from pytorch_pretrained_bert.modeling import BertModel [as 別名]
# 或者: from pytorch_pretrained_bert.modeling.BertModel import from_pretrained [as 別名]
def __init__(self, pretrained_model: str,
                 requires_grad: bool = False,
                 dropout: float = 0.1,
                 layer_dropout: float = 0.1,
                 combine_layers: str = "mix") -> None:
        model = BertModel.from_pretrained(pretrained_model)

        for param in model.parameters():
            param.requires_grad = requires_grad

        super().__init__(bert_model=model,
                         layer_dropout=layer_dropout,
                         combine_layers=combine_layers)

        self.model = model
        self.dropout = dropout
        self.set_dropout(dropout) 
開發者ID:Hyperparticle,項目名稱:udify,代碼行數:19,代碼來源:bert_pretrained.py

示例2: main

# 需要導入模塊: from pytorch_pretrained_bert.modeling import BertModel [as 別名]
# 或者: from pytorch_pretrained_bert.modeling.BertModel import from_pretrained [as 別名]
def main():
    torch.manual_seed(args.seed)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available()
    args.use_gpu = use_gpu

    if use_gpu:
        print("Currently using GPU {}".format(args.gpu_devices))
        cudnn.benchmark = True
        torch.cuda.manual_seed_all(args.seed)
    else:
        print("Currently using CPU (GPU is highly recommended)")

    tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")
    bert_model = BertModel.from_pretrained("bert-base-chinese")

    if use_gpu:
        bert_model = bert_model.cuda()

    processor = Preprocess(args, tokenizer, bert_model)
    processor.do_preprocess() 
開發者ID:tracy-talent,項目名稱:curriculum,代碼行數:23,代碼來源:preprocess_embedding.py

示例3: __init__

# 需要導入模塊: from pytorch_pretrained_bert.modeling import BertModel [as 別名]
# 或者: from pytorch_pretrained_bert.modeling.BertModel import from_pretrained [as 別名]
def __init__(self, hps):
        super(BertEncoder, self).__init__()

        from pytorch_pretrained_bert.modeling import BertModel

        self._hps = hps
        self.sent_max_len = hps.sent_max_len
        self._cuda = hps.cuda

        embed_size = hps.word_emb_dim
        sent_max_len = hps.sent_max_len

        input_channels = 1
        out_channels = hps.output_channel
        min_kernel_size = hps.min_kernel_size
        max_kernel_size = hps.max_kernel_size
        width = embed_size

        # word embedding
        self._bert = BertModel.from_pretrained("/remote-home/dqwang/BERT/pre-train/uncased_L-24_H-1024_A-16")
        self._bert.eval()
        for p in self._bert.parameters():
            p.requires_grad = False

        self.word_embedding_proj = nn.Linear(4096, embed_size)

        # position embedding
        self.position_embedding = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(sent_max_len + 1, embed_size, padding_idx=0), freeze=True)

        # cnn
        self.convs = nn.ModuleList([nn.Conv2d(input_channels, out_channels, kernel_size = (height, width)) for height in range(min_kernel_size, max_kernel_size+1)])
        logger.info("[INFO] Initing W for CNN.......")
        for conv in self.convs:
            init_weight_value = 6.0
            init.xavier_normal_(conv.weight.data, gain=np.sqrt(init_weight_value))
            fan_in, fan_out = Encoder.calculate_fan_in_and_fan_out(conv.weight.data)
            std = np.sqrt(init_weight_value) * np.sqrt(2.0 / (fan_in + fan_out)) 
開發者ID:fastnlp,項目名稱:fastNLP,代碼行數:39,代碼來源:Encoder.py

示例4: __init__

# 需要導入模塊: from pytorch_pretrained_bert.modeling import BertModel [as 別名]
# 或者: from pytorch_pretrained_bert.modeling.BertModel import from_pretrained [as 別名]
def __init__(self, config):
        super(bc_RNN, self).__init__()

        self.config = config
        self.encoder = BertModel.from_pretrained("bert-base-uncased")

        context_input_size = (config.num_layers
                              * config.encoder_hidden_size)

        self.context_encoder = layer.ContextRNN(context_input_size,
                                                 config.context_size,
                                                 config.rnn,
                                                 config.num_layers,
                                                 config.dropout)

        self.context2decoder = layer.FeedForward(config.context_size,
                                                  config.num_layers * config.context_size,
                                                  num_layers=1,
                                                  activation=config.activation,
                                                  isActivation=True)
        
        self.decoder2output = layer.FeedForward(config.num_layers * config.context_size,
                                                 config.num_classes,
                                                 num_layers=1,
                                                 isActivation=False)
        self.dropoutLayer = nn.Dropout(p=config.dropout) 
開發者ID:declare-lab,項目名稱:conv-emotion,代碼行數:28,代碼來源:models.py

示例5: __init__

# 需要導入模塊: from pytorch_pretrained_bert.modeling import BertModel [as 別名]
# 或者: from pytorch_pretrained_bert.modeling.BertModel import from_pretrained [as 別名]
def __init__(self, args, use_gpu):
        super(BaseModel, self).__init__()
        self.num_labels = 2
        self.use_gpu = use_gpu

        self.bert_model = BertModel.from_pretrained("bert-base-multilingual-cased")
        self.dropout = nn.Dropout(args.dropout_prob)
        self.classifier = nn.Linear(768, self.num_labels)
        self.init_weight() 
開發者ID:tracy-talent,項目名稱:curriculum,代碼行數:11,代碼來源:model.py

示例6: load

# 需要導入模塊: from pytorch_pretrained_bert.modeling import BertModel [as 別名]
# 或者: from pytorch_pretrained_bert.modeling.BertModel import from_pretrained [as 別名]
def load(args):
  print('loading %s model'%args.bert_model)
  device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
  tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=True, cache_dir=args.cache_dir)
  model = BertModel.from_pretrained(args.bert_model, cache_dir=args.cache_dir)
  model.to(device)
  if args.num_gpus > 1:
    model = torch.nn.DataParallel(model)
  model.eval()
  return model, tokenizer, device 
開發者ID:ganeshjawahar,項目名稱:interpret_bert,代碼行數:12,代碼來源:extract_features.py

示例7: load

# 需要導入模塊: from pytorch_pretrained_bert.modeling import BertModel [as 別名]
# 或者: from pytorch_pretrained_bert.modeling.BertModel import from_pretrained [as 別名]
def load(args):
  print('loading %s model'%args.bert_model)
  device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
  tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=True, cache_dir=args.cache_dir)
  model = BertModel.from_pretrained(args.bert_model, cache_dir=args.cache_dir)
  model.to(device)
  if args.num_gpus > 1:
    model = torch.nn.DataParallel(model)
  if args.untrained_bert:
    model.apply(init_weights)
  model.eval()
  return model, tokenizer, device 
開發者ID:ganeshjawahar,項目名稱:interpret_bert,代碼行數:14,代碼來源:extract_features.py

示例8: load_bert

# 需要導入模塊: from pytorch_pretrained_bert.modeling import BertModel [as 別名]
# 或者: from pytorch_pretrained_bert.modeling.BertModel import from_pretrained [as 別名]
def load_bert(args):
  # load bert tokenizer and model
  tokenizer = BertTokenizer.from_pretrained(args.bert_model, 
              do_lower_case=True,
              cache_dir=args.cache_dir)
  pretrained_model = BertModel.from_pretrained(args.bert_model, 
              cache_dir=args.cache_dir)
  return tokenizer, pretrained_model

# role scheme generator 
開發者ID:ganeshjawahar,項目名稱:interpret_bert,代碼行數:12,代碼來源:approx.py

示例9: __init__

# 需要導入模塊: from pytorch_pretrained_bert.modeling import BertModel [as 別名]
# 或者: from pytorch_pretrained_bert.modeling.BertModel import from_pretrained [as 別名]
def __init__(self, model='bert-large-uncased', use_cuda=True):
        self.model = model
        
        logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s', 
                    datefmt = '%m/%d/%Y %H:%M:%S',
                    level = logging.INFO)
        logger = logging.getLogger(__name__)

        self.args = args = AttrDict({
            'bert_model': self.model,
            'do_lower_case': True,
            'layers': "-1,-2,-3,-4",
            'max_seq_length': 512,
            'batch_size': 2,
            'local_rank': -1,
            'no_cuda': not use_cuda
        })
        
        if args.local_rank == -1 or args.no_cuda:
            device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
            n_gpu = torch.cuda.device_count()
        else:
            device = torch.device("cuda", args.local_rank)
            n_gpu = 1
            # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
            torch.distributed.init_process_group(backend='nccl')
        
        logger.info("device: {} n_gpu: {} distributed training: {}".format(device, n_gpu, bool(args.local_rank != -1)))

        print('loading from model')
        model = BertModel.from_pretrained('results/bert_finetuned/lm/', cache_dir='results/bert_finetuned/lm/')
        print('loaded model')
        model.to(device)
        
        if args.local_rank != -1:
            model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
                                                              output_device=args.local_rank)
        elif n_gpu > 1:
            model = torch.nn.DataParallel(model)
        
        model.eval()
        
        self.device = device
        self.model = model 
開發者ID:sattree,項目名稱:gap,代碼行數:46,代碼來源:bert_features.py

示例10: transform

# 需要導入模塊: from pytorch_pretrained_bert.modeling import BertModel [as 別名]
# 或者: from pytorch_pretrained_bert.modeling.BertModel import from_pretrained [as 別名]
def transform(self, X):
        tokenizer = BertTokenizer.from_pretrained(self.args.bert_model, do_lower_case=self.args.do_lower_case, cache_dir='tmp/')

        examples = []

        for idx, row in X.iterrows():
            examples.append(InputExample(unique_id=idx, text_a=row.text, text_b=None))

        features = convert_examples_to_features(
            examples=examples, seq_length=self.args.max_seq_length, tokenizer=tokenizer)
        
        unique_id_to_feature = {}
        for feature in features:
            unique_id_to_feature[feature.unique_id] = feature

        all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
        all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)

        eval_data = TensorDataset(all_input_ids, all_input_mask, all_example_index)
        if self.args.local_rank == -1:
            eval_sampler = SequentialSampler(eval_data)
        else:
            eval_sampler = DistributedSampler(eval_data)
        eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=self.args.batch_size)
        
        layer_indexes = [int(x) for x in self.args.layers.split(",")]

        output = []
        for input_ids, input_mask, example_indices in tqdm(eval_dataloader):
            input_ids = input_ids.to(self.device)
            input_mask = input_mask.to(self.device)

            all_encoder_layers, _ = self.model(input_ids, token_type_ids=None, attention_mask=input_mask)
            all_encoder_layers = all_encoder_layers

            for b, example_index in enumerate(example_indices):
                feature = features[example_index.item()]
                unique_id = int(feature.unique_id)
                tokens = []
                layers = [[] for _ in layer_indexes]
                all_out_features = []
                for (i, token) in enumerate(feature.tokens):
                    for (j, layer_index) in enumerate(layer_indexes):
                        layer_output = all_encoder_layers[int(layer_index)].detach().cpu().numpy()
                        layer_output = layer_output[b]
                        layers[j].append([round(x.item(), 6) for x in layer_output[i]])
                    tokens.append(token)
                output.append([tokens, *layers])
                
        output = pd.DataFrame(output, columns=['tokens', *['layer_{}'.format(idx) for idx in layer_indexes]])
        res = []
        for idx, row in X.iterrows():
            res.append(self.get_sample_props(output.loc[idx], layer_indexes, **row)[1:])
        
        res = pd.DataFrame(res, columns=['tokens', 'pronoun_offset_token',
                                                'a_offset_token', 'b_offset_token', 'a_span',
                                                'b_span', 'pronoun_token', 'a_tokens', 'b_tokens', 'bert', 'cls'])
        
        cols = set(X.columns).difference(res.columns)
        return {'X': pd.concat([X[cols], res], axis=1)} 
開發者ID:sattree,項目名稱:gap,代碼行數:63,代碼來源:bert_features.py

示例11: __init__

# 需要導入模塊: from pytorch_pretrained_bert.modeling import BertModel [as 別名]
# 或者: from pytorch_pretrained_bert.modeling.BertModel import from_pretrained [as 別名]
def __init__(
        self,
        bert_model=None,
        tokenizer=None,
        language=Language.ENGLISH,
        num_gpus=None,
        cache_dir=".",
        to_lower=True,
        max_len=512,
        layer_index=-1,
        pooling_strategy=PoolingStrategy.MEAN,
    ):
        """Initialize the encoder's underlying model and tokenizer

        Args:
            bert_model: BERT model to use for encoding.
                Defaults to pretrained BertModel.
            tokenizer: Tokenizer to use for preprocessing.
                Defaults to pretrained BERT tokenizer.
            language: The pretrained model's language. Defaults to Language.ENGLISH.
            num_gpus: The number of gpus to use. Defaults to None, which forces all
                available GPUs to be used.
            cache_dir: Location of BERT's cache directory. Defaults to "."
            to_lower: True to lowercase before tokenization. Defaults to False.
            max_len: Maximum number of tokens.
            layer_index: The layer from which to extract features.
                         Defaults to the last layer; can also be a list of integers
                         for experimentation.
            pooling_strategy: Pooling strategy to aggregate token embeddings into
                sentence embedding.
        """
        self.model = (
            bert_model.model.bert
            if bert_model
            else BertModel.from_pretrained(language, cache_dir=cache_dir)
        )
        self.tokenizer = (
            tokenizer
            if tokenizer
            else Tokenizer(language, to_lower=to_lower, cache_dir=cache_dir)
        )
        self.num_gpus = num_gpus
        self.max_len = max_len
        self.layer_index = layer_index
        self.pooling_strategy = pooling_strategy
        self.has_cuda = self.cuda 
開發者ID:microsoft,項目名稱:nlp-recipes,代碼行數:48,代碼來源:sequence_encoding.py


注:本文中的pytorch_pretrained_bert.modeling.BertModel.from_pretrained方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。