本文整理汇总了Python中transformers.BertConfig方法的典型用法代码示例。如果您正苦于以下问题:Python transformers.BertConfig方法的具体用法?Python transformers.BertConfig怎么用?Python transformers.BertConfig使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类transformers
的用法示例。
在下文中一共展示了transformers.BertConfig方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import BertConfig [as 别名]
def main():
with open("build/data/bert_tf_v1_1_large_fp32_384_v2/bert_config.json") as f:
config_json = json.load(f)
config = BertConfig(
attention_probs_dropout_prob=config_json["attention_probs_dropout_prob"],
hidden_act=config_json["hidden_act"],
hidden_dropout_prob=config_json["hidden_dropout_prob"],
hidden_size=config_json["hidden_size"],
initializer_range=config_json["initializer_range"],
intermediate_size=config_json["intermediate_size"],
max_position_embeddings=config_json["max_position_embeddings"],
num_attention_heads=config_json["num_attention_heads"],
num_hidden_layers=config_json["num_hidden_layers"],
type_vocab_size=config_json["type_vocab_size"],
vocab_size=config_json["vocab_size"])
model = load_from_tf(config, "build/data/bert_tf_v1_1_large_fp32_384_v2/model.ckpt-5474")
torch.save(model.state_dict(), "build/data/bert_tf_v1_1_large_fp32_384_v2/model.pytorch")
save_to_onnx(model)
示例2: __init__
# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import BertConfig [as 别名]
def __init__(self):
print("Loading BERT configs...")
with open("bert_config.json") as f:
config_json = json.load(f)
config = BertConfig(
attention_probs_dropout_prob=config_json["attention_probs_dropout_prob"],
hidden_act=config_json["hidden_act"],
hidden_dropout_prob=config_json["hidden_dropout_prob"],
hidden_size=config_json["hidden_size"],
initializer_range=config_json["initializer_range"],
intermediate_size=config_json["intermediate_size"],
max_position_embeddings=config_json["max_position_embeddings"],
num_attention_heads=config_json["num_attention_heads"],
num_hidden_layers=config_json["num_hidden_layers"],
type_vocab_size=config_json["type_vocab_size"],
vocab_size=config_json["vocab_size"])
print("Loading PyTorch model...")
self.model = BertForQuestionAnswering(config)
self.model.eval()
self.model.cuda()
self.model.load_state_dict(torch.load("build/data/bert_tf_v1_1_large_fp32_384_v2/model.pytorch"))
print("Constructing SUT...")
self.sut = lg.ConstructSUT(self.issue_queries, self.flush_queries, self.process_latencies)
print("Finished constructing SUT.")
self.qsl = get_squad_QSL()
示例3: prepare_config_and_inputs
# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import BertConfig [as 别名]
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = BertConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
示例4: setUp
# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import BertConfig [as 别名]
def setUp(self):
self.model_tester = TFBertModelTest.TFBertModelTester(self)
self.config_tester = ConfigTester(self, config_class=BertConfig, hidden_size=37)
示例5: test_TFBertModel
# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import BertConfig [as 别名]
def test_TFBertModel(self):
from transformers import BertConfig, TFBertModel
keras.backend.clear_session()
# pretrained_weights = 'bert-base-uncased'
tokenizer_file = 'bert_bert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = BertConfig()
model = TFBertModel(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
atol=1.e-4))
示例6: test_TFBertForPreTraining
# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import BertConfig [as 别名]
def test_TFBertForPreTraining(self):
from transformers import BertConfig, TFBertForPreTraining
keras.backend.clear_session()
# pretrained_weights = 'bert-base-uncased'
tokenizer_file = 'bert_bert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = BertConfig()
model = TFBertForPreTraining(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
atol=1.e-4))
示例7: test_TFBertForMaskedLM
# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import BertConfig [as 别名]
def test_TFBertForMaskedLM(self):
from transformers import BertConfig, TFBertForMaskedLM
keras.backend.clear_session()
# pretrained_weights = 'bert-base-uncased'
tokenizer_file = 'bert_bert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = BertConfig()
model = TFBertForMaskedLM(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(
run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
atol=1.e-4))
示例8: test_TFBertForNextSentencePrediction
# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import BertConfig [as 别名]
def test_TFBertForNextSentencePrediction(self):
from transformers import BertConfig, TFBertForNextSentencePrediction
keras.backend.clear_session()
# pretrained_weights = 'bert-base-uncased'
tokenizer_file = 'bert_bert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = BertConfig()
model = TFBertForNextSentencePrediction(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
示例9: test_TFBertForSequenceClassification
# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import BertConfig [as 别名]
def test_TFBertForSequenceClassification(self):
from transformers import BertConfig, TFBertForSequenceClassification
keras.backend.clear_session()
# pretrained_weights = 'bert-base-uncased'
tokenizer_file = 'bert_bert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = BertConfig()
model = TFBertForSequenceClassification(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
示例10: test_TFBertForQuestionAnswering
# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import BertConfig [as 别名]
def test_TFBertForQuestionAnswering(self):
from transformers import BertConfig, TFBertForQuestionAnswering
keras.backend.clear_session()
# pretrained_weights = 'bert-base-uncased'
tokenizer_file = 'bert_bert-base-uncased.pickle'
tokenizer = self._get_tokenzier(tokenizer_file)
text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
config = BertConfig()
model = TFBertForQuestionAnswering(config)
predictions = model.predict(inputs)
onnx_model = keras2onnx.convert_keras(model, model.name)
self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files))
示例11: make_model
# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import BertConfig [as 别名]
def make_model(self, src_vocab, tgt_vocab, N_enc=6, N_dec=6,
d_model=512, d_ff=2048, h=8, dropout=0.1):
"Helper: Construct a model from hyperparameters."
enc_config = BertConfig(vocab_size=1,
hidden_size=d_model,
num_hidden_layers=N_enc,
num_attention_heads=h,
intermediate_size=d_ff,
hidden_dropout_prob=dropout,
attention_probs_dropout_prob=dropout,
max_position_embeddings=1,
type_vocab_size=1)
dec_config = BertConfig(vocab_size=tgt_vocab,
hidden_size=d_model,
num_hidden_layers=N_dec,
num_attention_heads=h,
intermediate_size=d_ff,
hidden_dropout_prob=dropout,
attention_probs_dropout_prob=dropout,
max_position_embeddings=17,
type_vocab_size=1,
is_decoder=True)
encoder = BertModel(enc_config)
def return_embeds(*args, **kwargs):
return kwargs['inputs_embeds']
del encoder.embeddings; encoder.embeddings = return_embeds
decoder = BertModel(dec_config)
model = EncoderDecoder(
encoder,
decoder,
Generator(d_model, tgt_vocab))
return model
示例12: __init__
# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import BertConfig [as 别名]
def __init__(self, encoder, args, model_class, pretrained_model_name, max_pos=512, pretrained_config = None, temp_dir="./"):
super(BertSumExt, self).__init__()
self.loss = torch.nn.BCELoss(reduction='none')
#self.device = device
self.transformer = Transformer(temp_dir, model_class, pretrained_model_name, pretrained_config)
if (encoder == 'classifier'):
self.encoder = Classifier(self.transformer.model.config.hidden_size)
elif(encoder=='transformer'):
self.encoder = ExtTransformerEncoder(self.transformer.model.config.hidden_size, args.ff_size, args.heads,
args.dropout, args.inter_layers)
elif(encoder=='rnn'):
self.encoder = RNNEncoder(bidirectional=True, num_layers=1,
input_size=self.transformer.model.config.hidden_size, hidden_size=args.rnn_size,
dropout=args.dropout)
elif (encoder == 'baseline'):
bert_config = BertConfig(self.transformer.model.config.vocab_size, hidden_size=args.hidden_size,
num_hidden_layers=6, num_attention_heads=8, intermediate_size=args.ff_size)
self.transformer.model = BertModel(bert_config)
self.encoder = Classifier(self.transformer.model.config.hidden_size)
self.max_pos = max_pos
if(max_pos > 512):
my_pos_embeddings = nn.Embedding(self.max_pos, self.transformer.model.config.hidden_size)
my_pos_embeddings.weight.data[:512] = self.transformer.model.embeddings.position_embeddings.weight.data
my_pos_embeddings.weight.data[512:] = self.transformer.model.embeddings.position_embeddings.weight.data[-1][None,:].repeat(self.max_pos-512,1)
self.transformer.model.embeddings.position_embeddings = my_pos_embeddings
if args.param_init != 0.0:
for p in self.encoder.parameters():
p.data.uniform_(-args.param_init, args.param_init)
if args.param_init_glorot:
for p in self.encoder.parameters():
if p.dim() > 1:
xavier_uniform_(p)
#self.to(device)