本文整理汇总了Python中preprocessor.Preprocessor.clearTokenCache方法的典型用法代码示例。如果您正苦于以下问题:Python Preprocessor.clearTokenCache方法的具体用法?Python Preprocessor.clearTokenCache怎么用?Python Preprocessor.clearTokenCache使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类preprocessor.Preprocessor
的用法示例。
在下文中一共展示了Preprocessor.clearTokenCache方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: analyze
# 需要导入模块: from preprocessor import Preprocessor [as 别名]
# 或者: from preprocessor.Preprocessor import clearTokenCache [as 别名]
def analyze(snd_pipe, db_path, pp_cfg, parser_cfg, srcFiles, use_pipeline=False, analyzer_process=1, pp_process=1, parser_process=1):
db = DatabaseManager()
pp_list = [Preprocessor(**pp_cfg) for i in range(pp_process if use_pipeline else analyzer_process)]
parser_list = [Parser(**parser_cfg) for i in range(parser_process if use_pipeline else analyzer_process)]
numFiles = len(srcFiles)
use_pipeline = use_pipeline
t_0 = datetime.datetime.now()
projInfo = {}
projInfo['predefined'] = pp_list[0].preprocess_predef()
task_queue = Queue()
done_queue = Queue()
for i, srcFile in enumerate(srcFiles):
task_queue.put(srcFile)
for i in range(len(pp_list)):
task_queue.put('STOP')
if not use_pipeline:
analyzer_p_list = [Process(target=analyzer_worker, args=(pp, parser, task_queue, done_queue)) for pp, parser in zip(pp_list, parser_list)]
for analyzer_p in analyzer_p_list:
analyzer_p.start()
for i, srcFile in enumerate(srcFiles):
#print 'analyze: [%d/%d]' % (i,numFiles), srcFile
projInfo[srcFile] = done_queue.get()
snd_pipe.send((i, numFiles, srcFile))
if snd_pipe.poll():
for analyzer_p in analyzer_p_list:
analyzer_p.terminate()
for analyzer_p in analyzer_p_list:
analyzer_p.join()
Preprocessor.clearTokenCache()
snd_pipe.send('STOPPED')
print 'analyze: canceled'
return
for analyzer_p in analyzer_p_list:
analyzer_p.join()
else:
pp_queue = Queue()
pp_p_list = [Process(target=preprocessor_worker, args=(pp, task_queue, pp_queue)) for pp in pp_list]
for pp_p in pp_p_list:
pp_p.start()
parser_p_list = [Process(target=parser_worker, args=(parser, pp_queue, done_queue)) for parser in parser_list]
for parser_p in parser_p_list:
parser_p.start()
for i, srcFile in enumerate(srcFiles):
#print 'analyze: [%d/%d]' % (i,numFiles), srcFile
projInfo[srcFile] = done_queue.get()
snd_pipe.send((i, numFiles, srcFile))
if snd_pipe.poll():
for pp_p in pp_p_list:
pp_p.terminate()
for parser_p in parser_p_list:
parser_p.terminate()
for pp_p in pp_p_list:
pp_p.join()
for parser_p in parser_p_list:
parser_p.join()
Preprocessor.clearTokenCache()
snd_pipe.send('STOPPED')
print 'analyze: canceled'
return
for i in range(len(parser_p_list)):
pp_queue.put('STOP')
for pp_p in pp_p_list:
pp_p.join()
for parser_p in parser_p_list:
parser_p.join()
t_1 = datetime.datetime.now()
db.createDB(db_path)
db.addData(projInfo)
db.saveDB()
db.closeDB()
print 'analyze: done', t_1 - t_0
snd_pipe.send((numFiles, numFiles, 'Generating Database ... done'))