本文整理汇总了Python中tokenization.FullTokenizer方法的典型用法代码示例。如果您正苦于以下问题:Python tokenization.FullTokenizer方法的具体用法?Python tokenization.FullTokenizer怎么用?Python tokenization.FullTokenizer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tokenization
的用法示例。
在下文中一共展示了tokenization.FullTokenizer方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_full_tokenizer
# 需要导入模块: import tokenization [as 别名]
# 或者: from tokenization import FullTokenizer [as 别名]
def test_full_tokenizer(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing", ","
]
with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
vocab_file = vocab_writer.name
tokenizer = tokenization.FullTokenizer(vocab_file)
os.unlink(vocab_file)
tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
self.assertAllEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertAllEqual(
tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
示例2: test_full_tokenizer
# 需要导入模块: import tokenization [as 别名]
# 或者: from tokenization import FullTokenizer [as 别名]
def test_full_tokenizer(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing", ","
]
with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:
if six.PY2:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
else:
vocab_writer.write("".join(
[x + "\n" for x in vocab_tokens]).encode("utf-8"))
vocab_file = vocab_writer.name
tokenizer = tokenization.FullTokenizer(vocab_file)
os.unlink(vocab_file)
tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
self.assertAllEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertAllEqual(
tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
开发者ID:Nagakiran1,项目名称:Extending-Google-BERT-as-Question-and-Answering-model-and-Chatbot,代码行数:24,代码来源:tokenization_test.py
示例3: test_full_tokenizer
# 需要导入模块: import tokenization [as 别名]
# 或者: from tokenization import FullTokenizer [as 别名]
def test_full_tokenizer(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing", ","
]
with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
vocab_file = vocab_writer.name
tokenizer = tokenization.FullTokenizer(vocab_file)
os.unlink(vocab_file)
tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
self.assertAllEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertAllEqual(
tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
示例4: test_full_tokenizer
# 需要导入模块: import tokenization [as 别名]
# 或者: from tokenization import FullTokenizer [as 别名]
def test_full_tokenizer(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un",
"runn", "##ing", ","
]
with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:
if six.PY2:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
else:
vocab_writer.write("".join(
[x + "\n" for x in vocab_tokens]).encode("utf-8"))
vocab_file = vocab_writer.name
tokenizer = tokenization.FullTokenizer(vocab_file)
os.unlink(vocab_file)
tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
self.assertAllEqual(tokens,
["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertAllEqual(
tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
示例5: __init__
# 需要导入模块: import tokenization [as 别名]
# 或者: from tokenization import FullTokenizer [as 别名]
def __init__(self, batch_size=32):
"""
init BertVector
:param batch_size: Depending on your memory default is 32
"""
self.max_seq_length = args.max_seq_len
self.layer_indexes = args.layer_indexes
self.gpu_memory_fraction = 1
if os.path.exists(args.graph_file):
self.graph_path = args.graph_file
else:
self.graph_path = optimize_graph()
self.tokenizer = tokenization.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=True)
self.batch_size = batch_size
self.estimator = self.get_estimator()
self.input_queue = Queue(maxsize=1)
self.output_queue = Queue(maxsize=1)
self.predict_thread = Thread(target=self.predict_from_queue, daemon=True)
self.predict_thread.start()
示例6: test_full_tokenizer
# 需要导入模块: import tokenization [as 别名]
# 或者: from tokenization import FullTokenizer [as 别名]
def test_full_tokenizer(self):
vocab_tokens = [
"[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
"##ing", ","
]
with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
vocab_file = vocab_writer.name
tokenizer = tokenization.FullTokenizer(vocab_file)
os.unlink(vocab_file)
tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
self.assertAllEqual(
tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertAllEqual(
tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
示例7: main
# 需要导入模块: import tokenization [as 别名]
# 或者: from tokenization import FullTokenizer [as 别名]
def main(_):
print('Loading Tokenizer...')
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab, do_lower_case=True)
if not os.path.exists(FLAGS.output_folder):
os.mkdir(FLAGS.output_folder)
qrels = None
if FLAGS.qrels:
qrels = load_qrels(path=FLAGS.qrels)
queries = load_queries(path=FLAGS.queries)
run = load_run(path=FLAGS.run)
data = merge(qrels=qrels, run=run, queries=queries)
print('Loading Collection...')
collection = load_collection(FLAGS.collection_path)
print('Converting to TFRecord...')
convert_dataset(data=data, collection=collection, tokenizer=tokenizer)
print('Done!')