當前位置: 首頁>>代碼示例>>Python>>正文


Python tokenization.FullTokenizer方法代碼示例

本文整理匯總了Python中tokenization.FullTokenizer方法的典型用法代碼示例。如果您正苦於以下問題:Python tokenization.FullTokenizer方法的具體用法?Python tokenization.FullTokenizer怎麽用?Python tokenization.FullTokenizer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tokenization的用法示例。


在下文中一共展示了tokenization.FullTokenizer方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_full_tokenizer

# 需要導入模塊: import tokenization [as 別名]
# 或者: from tokenization import FullTokenizer [as 別名]
def test_full_tokenizer(self):
        vocab_tokens = [
            "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
            "##ing", ","
        ]
        with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:
            vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))

            vocab_file = vocab_writer.name

        tokenizer = tokenization.FullTokenizer(vocab_file)
        os.unlink(vocab_file)

        tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
        self.assertAllEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])

        self.assertAllEqual(
            tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9]) 
開發者ID:Socialbird-AILab,項目名稱:BERT-Classification-Tutorial,代碼行數:20,代碼來源:tokenization_test.py

示例2: test_full_tokenizer

# 需要導入模塊: import tokenization [as 別名]
# 或者: from tokenization import FullTokenizer [as 別名]
def test_full_tokenizer(self):
    vocab_tokens = [
        "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
        "##ing", ","
    ]
    with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:
      if six.PY2:
        vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
      else:
        vocab_writer.write("".join(
            [x + "\n" for x in vocab_tokens]).encode("utf-8"))

      vocab_file = vocab_writer.name

    tokenizer = tokenization.FullTokenizer(vocab_file)
    os.unlink(vocab_file)

    tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
    self.assertAllEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])

    self.assertAllEqual(
        tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9]) 
開發者ID:Nagakiran1,項目名稱:Extending-Google-BERT-as-Question-and-Answering-model-and-Chatbot,代碼行數:24,代碼來源:tokenization_test.py

示例3: test_full_tokenizer

# 需要導入模塊: import tokenization [as 別名]
# 或者: from tokenization import FullTokenizer [as 別名]
def test_full_tokenizer(self):
    vocab_tokens = [
        "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
        "##ing", ","
    ]
    with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:
      vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))

      vocab_file = vocab_writer.name

    tokenizer = tokenization.FullTokenizer(vocab_file)
    os.unlink(vocab_file)

    tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
    self.assertAllEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])

    self.assertAllEqual(
        tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9]) 
開發者ID:yyht,項目名稱:BERT,代碼行數:20,代碼來源:tokenization_test.py

示例4: test_full_tokenizer

# 需要導入模塊: import tokenization [as 別名]
# 或者: from tokenization import FullTokenizer [as 別名]
def test_full_tokenizer(self):
        vocab_tokens = [
            "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un",
            "runn", "##ing", ","
        ]
        with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:
            if six.PY2:
                vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
            else:
                vocab_writer.write("".join(
                    [x + "\n" for x in vocab_tokens]).encode("utf-8"))

            vocab_file = vocab_writer.name

        tokenizer = tokenization.FullTokenizer(vocab_file)
        os.unlink(vocab_file)

        tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
        self.assertAllEqual(tokens,
                            ["un", "##want", "##ed", ",", "runn", "##ing"])

        self.assertAllEqual(
            tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9]) 
開發者ID:guoyaohua,項目名稱:BERT-Chinese-Annotation,代碼行數:25,代碼來源:tokenization_test.py

示例5: __init__

# 需要導入模塊: import tokenization [as 別名]
# 或者: from tokenization import FullTokenizer [as 別名]
def __init__(self, batch_size=32):
        """
        init BertVector
        :param batch_size:     Depending on your memory default is 32
        """
        self.max_seq_length = args.max_seq_len
        self.layer_indexes = args.layer_indexes
        self.gpu_memory_fraction = 1
        if os.path.exists(args.graph_file):
            self.graph_path = args.graph_file
        else:
            self.graph_path = optimize_graph()

        self.tokenizer = tokenization.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=True)
        self.batch_size = batch_size
        self.estimator = self.get_estimator()
        self.input_queue = Queue(maxsize=1)
        self.output_queue = Queue(maxsize=1)
        self.predict_thread = Thread(target=self.predict_from_queue, daemon=True)
        self.predict_thread.start() 
開發者ID:terrifyzhao,項目名稱:bert-utils,代碼行數:22,代碼來源:extract_feature.py

示例6: test_full_tokenizer

# 需要導入模塊: import tokenization [as 別名]
# 或者: from tokenization import FullTokenizer [as 別名]
def test_full_tokenizer(self):
        vocab_tokens = [
            "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
            "##ing", ","
        ]
        with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:
            vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))

            vocab_file = vocab_writer.name

        tokenizer = tokenization.FullTokenizer(vocab_file)
        os.unlink(vocab_file)

        tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
        self.assertAllEqual(
            tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])

        self.assertAllEqual(
            tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9]) 
開發者ID:chainer,項目名稱:models,代碼行數:21,代碼來源:tokenization_test.py

示例7: main

# 需要導入模塊: import tokenization [as 別名]
# 或者: from tokenization import FullTokenizer [as 別名]
def main(_):
  print('Loading Tokenizer...')
  tokenizer = tokenization.FullTokenizer(
      vocab_file=FLAGS.vocab, do_lower_case=True)

  if not os.path.exists(FLAGS.output_folder):
    os.mkdir(FLAGS.output_folder)

  qrels = None
  if FLAGS.qrels:
    qrels = load_qrels(path=FLAGS.qrels)

  queries = load_queries(path=FLAGS.queries)
  run = load_run(path=FLAGS.run)
  data = merge(qrels=qrels, run=run, queries=queries)

  print('Loading Collection...')
  collection = load_collection(FLAGS.collection_path)

  print('Converting to TFRecord...')
  convert_dataset(data=data, collection=collection, tokenizer=tokenizer)

  print('Done!') 
開發者ID:nyu-dl,項目名稱:dl4ir-doc2query,代碼行數:25,代碼來源:convert_msmarco_to_tfrecord.py


注:本文中的tokenization.FullTokenizer方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。