本文整理汇总了Python中dragnn.python.sentence_io.ConllSentenceReader方法的典型用法代码示例。如果您正苦于以下问题:Python sentence_io.ConllSentenceReader方法的具体用法?Python sentence_io.ConllSentenceReader怎么用?Python sentence_io.ConllSentenceReader使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类dragnn.python.sentence_io
的用法示例。
在下文中一共展示了sentence_io.ConllSentenceReader方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testReadFirstSentence
# 需要导入模块: from dragnn.python import sentence_io [as 别名]
# 或者: from dragnn.python.sentence_io import ConllSentenceReader [as 别名]
def testReadFirstSentence(self):
reader = sentence_io.ConllSentenceReader(self.filepath, 1)
sentences, last = reader.read()
self.assertEqual(1, len(sentences))
pb = sentence_pb2.Sentence()
pb.ParseFromString(sentences[0])
self.assertFalse(last)
self.assertEqual(
u'I knew I could do it properly if given the right kind of support .',
pb.text)
示例2: testReadFromTextFile
# 需要导入模块: from dragnn.python import sentence_io [as 别名]
# 或者: from dragnn.python.sentence_io import ConllSentenceReader [as 别名]
def testReadFromTextFile(self):
reader = sentence_io.ConllSentenceReader(self.filepath, self.batch_size)
self.assertParseable(reader, self.batch_size, False)
self.assertParseable(reader, self.batch_size, False)
self.assertParseable(reader, 14, True)
self.assertParseable(reader, 0, True)
self.assertParseable(reader, 0, True)
示例3: testReadAndProjectivize
# 需要导入模块: from dragnn.python import sentence_io [as 别名]
# 或者: from dragnn.python.sentence_io import ConllSentenceReader [as 别名]
def testReadAndProjectivize(self):
reader = sentence_io.ConllSentenceReader(
self.filepath, self.batch_size, projectivize=True)
self.assertParseable(reader, self.batch_size, False)
self.assertParseable(reader, self.batch_size, False)
self.assertParseable(reader, 14, True)
self.assertParseable(reader, 0, True)
self.assertParseable(reader, 0, True)
示例4: get_segmenter_corpus
# 需要导入模块: from dragnn.python import sentence_io [as 别名]
# 或者: from dragnn.python.sentence_io import ConllSentenceReader [as 别名]
def get_segmenter_corpus(input_data_path, use_text_format):
"""Reads in a character corpus for segmenting."""
# Read in the documents.
tf.logging.info('Reading documents...')
if use_text_format:
char_corpus = sentence_io.FormatSentenceReader(input_data_path,
'untokenized-text').corpus()
else:
input_corpus = sentence_io.ConllSentenceReader(input_data_path).corpus()
with tf.Session(graph=tf.Graph()) as tmp_session:
char_input = gen_parser_ops.char_token_generator(input_corpus)
char_corpus = tmp_session.run(char_input)
check.Eq(len(input_corpus), len(char_corpus))
return char_corpus