當前位置: 首頁>>代碼示例>>Python>>正文


Python transformers.BertConfig方法代碼示例

本文整理匯總了Python中transformers.BertConfig方法的典型用法代碼示例。如果您正苦於以下問題:Python transformers.BertConfig方法的具體用法?Python transformers.BertConfig怎麽用?Python transformers.BertConfig使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在transformers的用法示例。


在下文中一共展示了transformers.BertConfig方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: main

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import BertConfig [as 別名]
def main():
    with open("build/data/bert_tf_v1_1_large_fp32_384_v2/bert_config.json") as f:
        config_json = json.load(f)

    config = BertConfig(
        attention_probs_dropout_prob=config_json["attention_probs_dropout_prob"],
        hidden_act=config_json["hidden_act"],
        hidden_dropout_prob=config_json["hidden_dropout_prob"],
        hidden_size=config_json["hidden_size"],
        initializer_range=config_json["initializer_range"],
        intermediate_size=config_json["intermediate_size"],
        max_position_embeddings=config_json["max_position_embeddings"],
        num_attention_heads=config_json["num_attention_heads"],
        num_hidden_layers=config_json["num_hidden_layers"],
        type_vocab_size=config_json["type_vocab_size"],
        vocab_size=config_json["vocab_size"])

    model = load_from_tf(config, "build/data/bert_tf_v1_1_large_fp32_384_v2/model.ckpt-5474")
    torch.save(model.state_dict(), "build/data/bert_tf_v1_1_large_fp32_384_v2/model.pytorch")
    save_to_onnx(model) 
開發者ID:mlperf,項目名稱:inference,代碼行數:22,代碼來源:bert_tf_to_pytorch.py

示例2: __init__

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import BertConfig [as 別名]
def __init__(self):
        print("Loading BERT configs...")
        with open("bert_config.json") as f:
            config_json = json.load(f)

        config = BertConfig(
            attention_probs_dropout_prob=config_json["attention_probs_dropout_prob"],
            hidden_act=config_json["hidden_act"],
            hidden_dropout_prob=config_json["hidden_dropout_prob"],
            hidden_size=config_json["hidden_size"],
            initializer_range=config_json["initializer_range"],
            intermediate_size=config_json["intermediate_size"],
            max_position_embeddings=config_json["max_position_embeddings"],
            num_attention_heads=config_json["num_attention_heads"],
            num_hidden_layers=config_json["num_hidden_layers"],
            type_vocab_size=config_json["type_vocab_size"],
            vocab_size=config_json["vocab_size"])

        print("Loading PyTorch model...")
        self.model = BertForQuestionAnswering(config)
        self.model.eval()
        self.model.cuda()
        self.model.load_state_dict(torch.load("build/data/bert_tf_v1_1_large_fp32_384_v2/model.pytorch"))

        print("Constructing SUT...")
        self.sut = lg.ConstructSUT(self.issue_queries, self.flush_queries, self.process_latencies)
        print("Finished constructing SUT.")

        self.qsl = get_squad_QSL() 
開發者ID:mlperf,項目名稱:inference,代碼行數:31,代碼來源:pytorch_SUT.py

示例3: prepare_config_and_inputs

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import BertConfig [as 別名]
def prepare_config_and_inputs(self):
            input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

            input_mask = None
            if self.use_input_mask:
                input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)

            token_type_ids = None
            if self.use_token_type_ids:
                token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)

            sequence_labels = None
            token_labels = None
            choice_labels = None
            if self.use_labels:
                sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
                token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
                choice_labels = ids_tensor([self.batch_size], self.num_choices)

            config = BertConfig(
                vocab_size=self.vocab_size,
                hidden_size=self.hidden_size,
                num_hidden_layers=self.num_hidden_layers,
                num_attention_heads=self.num_attention_heads,
                intermediate_size=self.intermediate_size,
                hidden_act=self.hidden_act,
                hidden_dropout_prob=self.hidden_dropout_prob,
                attention_probs_dropout_prob=self.attention_probs_dropout_prob,
                max_position_embeddings=self.max_position_embeddings,
                type_vocab_size=self.type_vocab_size,
                initializer_range=self.initializer_range,
            )

            return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels 
開發者ID:bhoov,項目名稱:exbert,代碼行數:36,代碼來源:test_modeling_tf_bert.py

示例4: setUp

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import BertConfig [as 別名]
def setUp(self):
        self.model_tester = TFBertModelTest.TFBertModelTester(self)
        self.config_tester = ConfigTester(self, config_class=BertConfig, hidden_size=37) 
開發者ID:bhoov,項目名稱:exbert,代碼行數:5,代碼來源:test_modeling_tf_bert.py

示例5: test_TFBertModel

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import BertConfig [as 別名]
def test_TFBertModel(self):
        from transformers import BertConfig, TFBertModel
        keras.backend.clear_session()
        # pretrained_weights = 'bert-base-uncased'
        tokenizer_file = 'bert_bert-base-uncased.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = BertConfig()
        model = TFBertModel(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
                             atol=1.e-4)) 
開發者ID:onnx,項目名稱:keras-onnx,代碼行數:16,代碼來源:test_transformers.py

示例6: test_TFBertForPreTraining

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import BertConfig [as 別名]
def test_TFBertForPreTraining(self):
        from transformers import BertConfig, TFBertForPreTraining
        keras.backend.clear_session()
        # pretrained_weights = 'bert-base-uncased'
        tokenizer_file = 'bert_bert-base-uncased.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = BertConfig()
        model = TFBertForPreTraining(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
                             atol=1.e-4)) 
開發者ID:onnx,項目名稱:keras-onnx,代碼行數:16,代碼來源:test_transformers.py

示例7: test_TFBertForMaskedLM

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import BertConfig [as 別名]
def test_TFBertForMaskedLM(self):
        from transformers import BertConfig, TFBertForMaskedLM
        keras.backend.clear_session()
        # pretrained_weights = 'bert-base-uncased'
        tokenizer_file = 'bert_bert-base-uncased.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = BertConfig()
        model = TFBertForMaskedLM(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
                             atol=1.e-4)) 
開發者ID:onnx,項目名稱:keras-onnx,代碼行數:16,代碼來源:test_transformers.py

示例8: test_TFBertForNextSentencePrediction

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import BertConfig [as 別名]
def test_TFBertForNextSentencePrediction(self):
        from transformers import BertConfig, TFBertForNextSentencePrediction
        keras.backend.clear_session()
        # pretrained_weights = 'bert-base-uncased'
        tokenizer_file = 'bert_bert-base-uncased.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = BertConfig()
        model = TFBertForNextSentencePrediction(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files)) 
開發者ID:onnx,項目名稱:keras-onnx,代碼行數:14,代碼來源:test_transformers.py

示例9: test_TFBertForSequenceClassification

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import BertConfig [as 別名]
def test_TFBertForSequenceClassification(self):
        from transformers import BertConfig, TFBertForSequenceClassification
        keras.backend.clear_session()
        # pretrained_weights = 'bert-base-uncased'
        tokenizer_file = 'bert_bert-base-uncased.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = BertConfig()
        model = TFBertForSequenceClassification(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files)) 
開發者ID:onnx,項目名稱:keras-onnx,代碼行數:14,代碼來源:test_transformers.py

示例10: test_TFBertForQuestionAnswering

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import BertConfig [as 別名]
def test_TFBertForQuestionAnswering(self):
        from transformers import BertConfig, TFBertForQuestionAnswering
        keras.backend.clear_session()
        # pretrained_weights = 'bert-base-uncased'
        tokenizer_file = 'bert_bert-base-uncased.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = BertConfig()
        model = TFBertForQuestionAnswering(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files)) 
開發者ID:onnx,項目名稱:keras-onnx,代碼行數:14,代碼來源:test_transformers.py

示例11: make_model

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import BertConfig [as 別名]
def make_model(self, src_vocab, tgt_vocab, N_enc=6, N_dec=6, 
               d_model=512, d_ff=2048, h=8, dropout=0.1):
        "Helper: Construct a model from hyperparameters."
        enc_config = BertConfig(vocab_size=1,
                                hidden_size=d_model,
                                num_hidden_layers=N_enc,
                                num_attention_heads=h,
                                intermediate_size=d_ff,
                                hidden_dropout_prob=dropout,
                                attention_probs_dropout_prob=dropout,
                                max_position_embeddings=1,
                                type_vocab_size=1)
        dec_config = BertConfig(vocab_size=tgt_vocab,
                                hidden_size=d_model,
                                num_hidden_layers=N_dec,
                                num_attention_heads=h,
                                intermediate_size=d_ff,
                                hidden_dropout_prob=dropout,
                                attention_probs_dropout_prob=dropout,
                                max_position_embeddings=17,
                                type_vocab_size=1,
                                is_decoder=True)
        encoder = BertModel(enc_config)
        def return_embeds(*args, **kwargs):
            return kwargs['inputs_embeds']
        del encoder.embeddings; encoder.embeddings = return_embeds
        decoder = BertModel(dec_config)
        model = EncoderDecoder(
            encoder,
            decoder,
            Generator(d_model, tgt_vocab))
        return model 
開發者ID:ruotianluo,項目名稱:self-critical.pytorch,代碼行數:34,代碼來源:BertCapModel.py

示例12: __init__

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import BertConfig [as 別名]
def __init__(self, encoder, args, model_class, pretrained_model_name, max_pos=512, pretrained_config = None, temp_dir="./"):
        super(BertSumExt, self).__init__()
        self.loss = torch.nn.BCELoss(reduction='none')
        #self.device = device
        self.transformer = Transformer(temp_dir, model_class, pretrained_model_name, pretrained_config)
        if (encoder == 'classifier'):
            self.encoder = Classifier(self.transformer.model.config.hidden_size)
        elif(encoder=='transformer'):
            self.encoder = ExtTransformerEncoder(self.transformer.model.config.hidden_size, args.ff_size, args.heads,
                                                   args.dropout, args.inter_layers)
        elif(encoder=='rnn'):
            self.encoder = RNNEncoder(bidirectional=True, num_layers=1,
                                      input_size=self.transformer.model.config.hidden_size, hidden_size=args.rnn_size,
                                      dropout=args.dropout)
        elif (encoder == 'baseline'):
            bert_config = BertConfig(self.transformer.model.config.vocab_size, hidden_size=args.hidden_size,
                                     num_hidden_layers=6, num_attention_heads=8, intermediate_size=args.ff_size)
            self.transformer.model = BertModel(bert_config)
            self.encoder = Classifier(self.transformer.model.config.hidden_size)
        
        self.max_pos = max_pos
        if(max_pos > 512):
            my_pos_embeddings = nn.Embedding(self.max_pos, self.transformer.model.config.hidden_size)
            my_pos_embeddings.weight.data[:512] = self.transformer.model.embeddings.position_embeddings.weight.data
            my_pos_embeddings.weight.data[512:] = self.transformer.model.embeddings.position_embeddings.weight.data[-1][None,:].repeat(self.max_pos-512,1)
            self.transformer.model.embeddings.position_embeddings = my_pos_embeddings

        if args.param_init != 0.0:
            for p in self.encoder.parameters():
                p.data.uniform_(-args.param_init, args.param_init)
        if args.param_init_glorot:
            for p in self.encoder.parameters():
                if p.dim() > 1:
                    xavier_uniform_(p)

        #self.to(device) 
開發者ID:microsoft,項目名稱:nlp-recipes,代碼行數:38,代碼來源:model_builder.py


注:本文中的transformers.BertConfig方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。