当前位置: 首页>>代码示例>>Python>>正文


Python modeling.BertModel方法代码示例

本文整理汇总了Python中bert.modeling.BertModel方法的典型用法代码示例。如果您正苦于以下问题:Python modeling.BertModel方法的具体用法?Python modeling.BertModel怎么用?Python modeling.BertModel使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在bert.modeling的用法示例。


在下文中一共展示了modeling.BertModel方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_bert

# 需要导入模块: from bert import modeling [as 别名]
# 或者: from bert.modeling import BertModel [as 别名]
def get_bert(BERT_PT_PATH, bert_type, do_lower_case, no_pretraining):


    bert_config_file = os.path.join(BERT_PT_PATH, f'bert_config_{bert_type}.json')
    vocab_file = os.path.join(BERT_PT_PATH, f'vocab_{bert_type}.txt')
    init_checkpoint = os.path.join(BERT_PT_PATH, f'pytorch_model_{bert_type}.bin')



    bert_config = BertConfig.from_json_file(bert_config_file)
    tokenizer = tokenization.FullTokenizer(
        vocab_file=vocab_file, do_lower_case=do_lower_case)
    bert_config.print_status()

    model_bert = BertModel(bert_config)
    if no_pretraining:
        pass
    else:
        model_bert.load_state_dict(torch.load(init_checkpoint, map_location='cpu'))
        print("Load pre-trained parameters.")
    model_bert.to(device)

    return model_bert, tokenizer, bert_config 
开发者ID:naver,项目名称:sqlova,代码行数:25,代码来源:train_shallow_layer.py

示例2: get_bert

# 需要导入模块: from bert import modeling [as 别名]
# 或者: from bert.modeling import BertModel [as 别名]
def get_bert(BERT_PT_PATH, bert_type, do_lower_case, no_pretraining):
    bert_config_file = os.path.join(BERT_PT_PATH, f'bert_config_{bert_type}.json')
    vocab_file = os.path.join(BERT_PT_PATH, f'vocab_{bert_type}.txt')
    init_checkpoint = os.path.join(BERT_PT_PATH, f'pytorch_model_{bert_type}.bin')

    bert_config = BertConfig.from_json_file(bert_config_file)
    tokenizer = tokenization.FullTokenizer(
        vocab_file=vocab_file, do_lower_case=do_lower_case)
    bert_config.print_status()

    model_bert = BertModel(bert_config)
    if no_pretraining:
        pass
    else:
        model_bert.load_state_dict(torch.load(init_checkpoint, map_location='cpu'))
        print("Load pre-trained parameters.")
    model_bert.to(device)

    return model_bert, tokenizer, bert_config 
开发者ID:naver,项目名称:sqlova,代码行数:21,代码来源:train.py

示例3: get_bert_embeddings

# 需要导入模块: from bert import modeling [as 别名]
# 或者: from bert.modeling import BertModel [as 别名]
def get_bert_embeddings(input_ids,
                        bert_config,
                        input_mask=None,
                        token_type_ids=None,
                        is_training=False,
                        use_one_hot_embeddings=False,
                        scope=None):
  """Returns embeddings for BERT."""
  model = modeling.BertModel(
      config=bert_config,
      is_training=is_training,
      input_ids=input_ids,
      input_mask=input_mask,
      token_type_ids=token_type_ids,
      use_one_hot_embeddings=use_one_hot_embeddings,
      scope=scope)
  return model.get_sequence_output() 
开发者ID:google-research,项目名称:language,代码行数:19,代码来源:bert_utils.py

示例4: __init__

# 需要导入模块: from bert import modeling [as 别名]
# 或者: from bert.modeling import BertModel [as 别名]
def __init__(self, config, use_crf=False):
        super(BertForBIOAspectExtraction, self).__init__()
        self.bert = BertModel(config)
        self.use_crf = use_crf
        # TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
        # self.dropout = nn.Dropout(config.hidden_dropout_prob)
        self.affine = nn.Linear(config.hidden_size, 3)
        if self.use_crf:
            self.crf = ConditionalRandomField(3)

        def init_weights(module):
            if isinstance(module, (nn.Linear, nn.Embedding)):
                # Slightly different from the TF version which uses truncated_normal for initialization
                # cf https://github.com/pytorch/pytorch/pull/5617
                module.weight.data.normal_(mean=0.0, std=config.initializer_range)
            elif isinstance(module, BERTLayerNorm):
                module.beta.data.normal_(mean=0.0, std=config.initializer_range)
                module.gamma.data.normal_(mean=0.0, std=config.initializer_range)
            if isinstance(module, nn.Linear):
                module.bias.data.zero_()
        self.apply(init_weights) 
开发者ID:huminghao16,项目名称:SpanABSA,代码行数:23,代码来源:sentiment_modeling.py

示例5: create_predict_model

# 需要导入模块: from bert import modeling [as 别名]
# 或者: from bert.modeling import BertModel [as 别名]
def create_predict_model(bert_config, input_ids, input_mask,
                 segment_ids, use_one_hot_embeddings):
  """Creates a classification model."""
  all_logits = []
  input_ids_shape = modeling.get_shape_list(input_ids, expected_rank=2)
  batch_size = input_ids_shape[0]
  seq_length = input_ids_shape[1]

  model = modeling.BertModel(
      config=bert_config,
      is_training=False,
      input_ids=input_ids,
      input_mask=input_mask,
      token_type_ids=segment_ids,
      use_one_hot_embeddings=use_one_hot_embeddings,
      scope="bert")
  final_hidden = model.get_sequence_output()

  final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)
  hidden_size = final_hidden_shape[2]

  output_weights = tf.get_variable(
      "cls/open_qa/output_weights", [2, hidden_size],
      initializer=tf.truncated_normal_initializer(stddev=0.02))
  output_bias = tf.get_variable(
      "cls/open_qa/output_bias", [2], initializer=tf.zeros_initializer())

  final_hidden_matrix = tf.reshape(final_hidden,
                                   [batch_size * seq_length, hidden_size])
  logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)
  logits = tf.nn.bias_add(logits, output_bias)

  logits = tf.reshape(logits, [batch_size, seq_length, 2])
  logits = tf.transpose(logits, [2, 0, 1])
  unstacked_logits = tf.unstack(logits, axis=0)
  (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])

  return (start_logits, end_logits) 
开发者ID:thunlp,项目名称:XQA,代码行数:40,代码来源:run_bert_open_qa_eval.py

示例6: make_bert_graph

# 需要导入模块: from bert import modeling [as 别名]
# 或者: from bert.modeling import BertModel [as 别名]
def make_bert_graph(bert_config, max_seq_length, dropout_keep_prob_rate, num_labels, tune=False):
    input_ids = tf.placeholder(tf.int32, [None, max_seq_length], name='inputs_ids')
    input_mask = tf.placeholder(tf.int32, [None, max_seq_length], name='input_mask')
    segment_ids = tf.placeholder(tf.int32, [None, max_seq_length], name='segment_ids')
    model = BertModel(config=bert_config,
                      is_training=tune,
                      input_ids=input_ids,
                      input_mask=input_mask,
                      token_type_ids=segment_ids)
    if tune:
        bert_embeddings_dropout = tf.nn.dropout(model.pooled_output, keep_prob=(1 - dropout_keep_prob_rate))
        label_ids = tf.placeholder(tf.int32, [None], name='label_ids')
    else:
        bert_embeddings_dropout = model.pooled_output
        label_ids = None
    logits = tf.contrib.layers.fully_connected(inputs=bert_embeddings_dropout,
                                               num_outputs=num_labels,
                                               activation_fn=None,
                                               weights_initializer=tf.truncated_normal_initializer(stddev=0.02),
                                               biases_initializer=tf.zeros_initializer())
    if tune:
        # loss layer
        CE = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label_ids, logits=logits)
        loss = tf.reduce_mean(CE)
        return input_ids, input_mask, segment_ids, label_ids, logits, loss
    else:
        # prob layer
        probs = tf.nn.softmax(logits, axis=-1, name='probs')
        return model, input_ids, input_mask, segment_ids, probs 
开发者ID:ratsgo,项目名称:embedding,代码行数:31,代码来源:tune_utils.py

示例7: create_model

# 需要导入模块: from bert import modeling [as 别名]
# 或者: from bert.modeling import BertModel [as 别名]
def create_model(bert_config, is_training, input_ids, input_mask,
                 segment_ids, labels, num_labels, use_one_hot_embeddings):
    """
    创建X模型
    :param bert_config: bert 配置
    :param is_training:
    :param input_ids: 数据的idx 表示
    :param input_mask:
    :param segment_ids:
    :param labels: 标签的idx 表示
    :param num_labels: 类别数量
    :param use_one_hot_embeddings:
    :return:
    """
    # 使用数据加载BertModel,获取对应的字embedding
    model = modeling.BertModel(
        config=bert_config,
        is_training=is_training,
        input_ids=input_ids,
        input_mask=input_mask,
        token_type_ids=segment_ids,
        use_one_hot_embeddings=use_one_hot_embeddings
    )
    # 获取对应的embedding 输入数据[batch_size, seq_length, embedding_size]
    embedding = model.get_sequence_output()
    max_seq_length = embedding.shape[1].value

    used = tf.sign(tf.abs(input_ids))
    lengths = tf.reduce_sum(used, reduction_indices=1)  # [batch_size] 大小的向量,包含了当前batch中的序列长度

    blstm_crf = BLSTM_CRF(embedded_chars=embedding, hidden_unit=FLAGS.lstm_size, cell_type=FLAGS.cell, num_layers=FLAGS.num_layers,
                          dropout_rate=FLAGS.droupout_rate, initializers=initializers, num_labels=num_labels,
                          seq_length=max_seq_length, labels=labels, lengths=lengths, is_training=is_training)
    rst = blstm_crf.add_blstm_crf_layer(crf_only=True)
    return rst 
开发者ID:WenRichard,项目名称:KBQA-BERT,代码行数:37,代码来源:run_ner.py

示例8: create_model

# 需要导入模块: from bert import modeling [as 别名]
# 或者: from bert.modeling import BertModel [as 别名]
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
                 labels, num_labels, use_one_hot_embeddings):
  """Creates a classification model."""
  model = modeling.BertModel(
      config=bert_config,
      is_training=is_training,
      input_ids=input_ids,
      input_mask=input_mask,
      token_type_ids=segment_ids,
      use_one_hot_embeddings=use_one_hot_embeddings)

  # In the demo, we are doing a simple classification task on the entire
  # segment.
  #
  # If you want to use the token-level output, use model.get_sequence_output()
  # instead.
  output_layer = model.get_pooled_output()

  hidden_size = output_layer.shape[-1].value

  output_weights = tf.get_variable(
      "output_weights", [num_labels, hidden_size],
      initializer=tf.truncated_normal_initializer(stddev=0.02))

  output_bias = tf.get_variable(
      "output_bias", [num_labels], initializer=tf.zeros_initializer())

  with tf.variable_scope("loss"):
    if is_training:
      # I.e., 0.1 dropout
      output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)

    logits = tf.matmul(output_layer, output_weights, transpose_b=True)
    logits = tf.nn.bias_add(logits, output_bias)
    probabilities = tf.nn.softmax(logits, axis=-1)
    log_probs = tf.nn.log_softmax(logits, axis=-1)

    per_example_loss = -tf.reduce_sum(labels * log_probs, axis=-1)
    loss = tf.reduce_mean(per_example_loss)

    return (loss, per_example_loss, logits, probabilities) 
开发者ID:google-research,项目名称:language,代码行数:43,代码来源:run_classifier_distillation.py

示例9: create_model

# 需要导入模块: from bert import modeling [as 别名]
# 或者: from bert.modeling import BertModel [as 别名]
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
                 gt_probs, num_labels, use_one_hot_embeddings):
  """Creates a classification model."""
  model = modeling.BertModel(
      config=bert_config,
      is_training=is_training,
      input_ids=input_ids,
      input_mask=input_mask,
      token_type_ids=segment_ids,
      use_one_hot_embeddings=use_one_hot_embeddings)

  # In the demo, we are doing a simple classification task on the entire
  # segment.
  #
  # If you want to use the token-level output, use model.get_sequence_output()
  # instead.
  output_layer = model.get_pooled_output()

  hidden_size = output_layer.shape[-1].value

  output_weights = tf.get_variable(
      "output_weights", [num_labels, hidden_size],
      initializer=tf.truncated_normal_initializer(stddev=0.02))

  output_bias = tf.get_variable(
      "output_bias", [num_labels], initializer=tf.zeros_initializer())

  with tf.variable_scope("loss"):
    if is_training:
      # I.e., 0.1 dropout
      output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)

    logits = tf.matmul(output_layer, output_weights, transpose_b=True)
    logits = tf.nn.bias_add(logits, output_bias)
    probabilities = tf.nn.softmax(logits, axis=-1)
    log_probs = tf.nn.log_softmax(logits, axis=-1)

    per_example_loss = -tf.reduce_sum(gt_probs * log_probs, axis=-1)
    loss = tf.reduce_mean(per_example_loss)

    return (loss, per_example_loss, logits, probabilities) 
开发者ID:google-research,项目名称:language,代码行数:43,代码来源:run_bert_boolq_diff.py

示例10: create_model

# 需要导入模块: from bert import modeling [as 别名]
# 或者: from bert.modeling import BertModel [as 别名]
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
                 use_one_hot_embeddings):
  """Creates a classification model."""
  model = modeling.BertModel(
      config=bert_config,
      is_training=is_training,
      input_ids=input_ids,
      input_mask=input_mask,
      token_type_ids=segment_ids,
      use_one_hot_embeddings=use_one_hot_embeddings)

  final_hidden = model.get_sequence_output()

  final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)
  batch_size = final_hidden_shape[0]
  seq_length = final_hidden_shape[1]
  hidden_size = final_hidden_shape[2]

  output_weights = tf.get_variable(
      "cls/squad/output_weights", [2, hidden_size],
      initializer=tf.truncated_normal_initializer(stddev=0.02))

  output_bias = tf.get_variable(
      "cls/squad/output_bias", [2], initializer=tf.zeros_initializer())

  final_hidden_matrix = tf.reshape(final_hidden,
                                   [batch_size * seq_length, hidden_size])
  logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)
  logits = tf.nn.bias_add(logits, output_bias)

  logits = tf.reshape(logits, [batch_size, seq_length, 2])
  logits = tf.transpose(logits, [2, 0, 1])

  unstacked_logits = tf.unstack(logits, axis=0)

  (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])

  return (start_logits, end_logits) 
开发者ID:google-research,项目名称:language,代码行数:40,代码来源:run_squad.py

示例11: create_model

# 需要导入模块: from bert import modeling [as 别名]
# 或者: from bert.modeling import BertModel [as 别名]
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
                 labels, use_one_hot_embeddings):
  """Creates a classification model."""
  model = modeling.BertModel(
      config=bert_config,
      is_training=is_training,
      input_ids=input_ids,
      input_mask=input_mask,
      token_type_ids=segment_ids,
      use_one_hot_embeddings=use_one_hot_embeddings)

  output_layer = model.get_pooled_output()

  hidden_size = output_layer.shape[-1].value

  num_labels = 2  # This is hardcoded for binary classification

  with tf.variable_scope("cls/seq_relationship"):
    output_weights = tf.get_variable(
        "output_weights",
        shape=[num_labels, hidden_size],
        initializer=modeling.create_initializer(bert_config.initializer_range))
    output_bias = tf.get_variable(
        "output_bias", shape=[num_labels], initializer=tf.zeros_initializer())

    logits = tf.matmul(output_layer, output_weights, transpose_b=True)
    logits = tf.nn.bias_add(logits, output_bias)
    log_probs = tf.nn.log_softmax(logits, axis=-1)
    probabilities = tf.nn.softmax(logits, axis=-1)
    labels = tf.reshape(labels, [-1])
    one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
    per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
    loss = tf.reduce_mean(per_example_loss)
    return (loss, per_example_loss, logits, probabilities) 
开发者ID:google-research,项目名称:language,代码行数:36,代码来源:run_binary_coherence.py

示例12: create_cpc_model_and_placeholders

# 需要导入模块: from bert import modeling [as 别名]
# 或者: from bert.modeling import BertModel [as 别名]
def create_cpc_model_and_placeholders(num_choices):
  """Build model and placeholders."""

  bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config)
  is_training = False
  use_one_hot_embeddings = False
  seq_length = 512

  Placeholders = namedtuple("Placeholders", [
      "input_ids", "input_mask", "segment_ids", "labels", "label_types",
      "softmax_mask"
  ])

  input_ids = tf.placeholder(dtype=tf.int32, shape=[None, seq_length])
  input_mask = tf.placeholder(dtype=tf.int32, shape=[None, seq_length])
  segment_ids = tf.placeholder(dtype=tf.int32, shape=[None, seq_length])
  labels = tf.placeholder(dtype=tf.int32, shape=[None, 8])
  label_types = tf.placeholder(dtype=tf.int32, shape=[None, 8])
  softmax_mask = tf.placeholder(dtype=tf.bool, shape=[None])
  placeholders = Placeholders(input_ids, input_mask, segment_ids, labels,
                              label_types, softmax_mask)

  model = modeling.BertModel(
      config=bert_config,
      is_training=is_training,
      input_ids=input_ids,
      input_mask=input_mask,
      token_type_ids=segment_ids,
      use_one_hot_embeddings=use_one_hot_embeddings)

  logits, probabilities = create_cpc_model(model, num_choices, False)

  Model = namedtuple("Model", ["logits", "probabilities"])
  model = Model(logits, probabilities)

  return placeholders, model 
开发者ID:google-research,项目名称:language,代码行数:38,代码来源:discriminative_eval.py

示例13: create_cpc_model_and_placeholders

# 需要导入模块: from bert import modeling [as 别名]
# 或者: from bert.modeling import BertModel [as 别名]
def create_cpc_model_and_placeholders(num_choices):
  """Build model and placeholders."""

  bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config)
  is_training = False
  use_one_hot_embeddings = False
  seq_length = 512

  Placeholders = namedtuple("Placeholders", [
      "input_ids", "input_mask", "segment_ids", "labels", "label_types",
      "softmax_mask"
  ])

  input_ids = tf.placeholder(dtype=tf.int32, shape=[None, seq_length])
  input_mask = tf.placeholder(dtype=tf.int32, shape=[None, seq_length])
  segment_ids = tf.placeholder(dtype=tf.int32, shape=[None, seq_length])
  labels = tf.placeholder(dtype=tf.int32, shape=[None, 8])
  label_types = tf.placeholder(dtype=tf.int32, shape=[None, 8])
  softmax_mask = tf.placeholder(dtype=tf.bool, shape=[None])
  placeholders = Placeholders(input_ids, input_mask, segment_ids, labels,
                              label_types, softmax_mask)

  model = modeling.BertModel(
      config=bert_config,
      is_training=is_training,
      input_ids=input_ids,
      input_mask=input_mask,
      token_type_ids=segment_ids,
      use_one_hot_embeddings=use_one_hot_embeddings)

  logits, probabilities = create_cpc_model(model, num_choices, False,
                                           softmax_mask)

  Model = namedtuple("Model", ["logits", "probabilities"])
  model = Model(logits, probabilities)

  return placeholders, model 
开发者ID:google-research,项目名称:language,代码行数:39,代码来源:coherence_eval.py

示例14: create_model

# 需要导入模块: from bert import modeling [as 别名]
# 或者: from bert.modeling import BertModel [as 别名]
def create_model(bert_config, is_training, input_ids, input_mask,
                 use_one_hot_embeddings):
  """Creates a classification model."""
  model = modeling.BertModel(
      config=bert_config,
      is_training=is_training,
      input_ids=input_ids,
      input_mask=input_mask,
      use_one_hot_embeddings=use_one_hot_embeddings)

  # Get the logits for the start and end predictions.
  final_hidden = model.get_sequence_output()

  final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)
  batch_size = final_hidden_shape[0]
  seq_length = final_hidden_shape[1]
  hidden_size = final_hidden_shape[2]

  output_weights = tf.get_variable(
      "cls/nq/output_weights", [2, hidden_size],
      initializer=tf.truncated_normal_initializer(stddev=0.02))

  output_bias = tf.get_variable(
      "cls/nq/output_bias", [2], initializer=tf.zeros_initializer())

  final_hidden_matrix = tf.reshape(final_hidden,
                                   [batch_size * seq_length, hidden_size])
  logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)
  logits = tf.nn.bias_add(logits, output_bias)

  logits = tf.reshape(logits, [batch_size, seq_length, 2])
  logits = tf.transpose(logits, [2, 0, 1])

  unstacked_logits = tf.unstack(logits, axis=0)

  (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])
  return (start_logits, end_logits) 
开发者ID:google-research,项目名称:language,代码行数:39,代码来源:run_mrqa.py

示例15: shared_qry_encoder_v2

# 需要导入模块: from bert import modeling [as 别名]
# 或者: from bert.modeling import BertModel [as 别名]
def shared_qry_encoder_v2(qry_input_ids, qry_input_mask, is_training,
                          use_one_hot_embeddings, bert_config, qa_config):
  """Embed query into a BOW and shared dense representation."""
  qry_model = modeling.BertModel(
      config=bert_config,
      is_training=is_training,
      input_ids=qry_input_ids,
      input_mask=qry_input_mask,
      use_one_hot_embeddings=use_one_hot_embeddings,
      scope="bert")
  qry_seq_emb, _ = _get_bert_embeddings(
      qry_model, [qa_config.qry_num_layers - 2], "concat", name="qry")
  word_emb_table = qry_model.get_embedding_table()

  return qry_seq_emb, word_emb_table 
开发者ID:google-research,项目名称:language,代码行数:17,代码来源:model_fns.py


注:本文中的bert.modeling.BertModel方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。