本文整理汇总了Python中jieba.set_dictionary方法的典型用法代码示例。如果您正苦于以下问题:Python jieba.set_dictionary方法的具体用法?Python jieba.set_dictionary怎么用?Python jieba.set_dictionary使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类jieba
的用法示例。
在下文中一共展示了jieba.set_dictionary方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: segment_trans
# 需要导入模块: import jieba [as 别名]
# 或者: from jieba import set_dictionary [as 别名]
def segment_trans(vocab_file, text_file):
''' segment transcripts according to vocab
using Maximum Matching Algorithm
Args:
vocab_file: vocab file
text_file: transcripts file
Returns:
seg_trans: segment words
'''
jieba.set_dictionary(vocab_file)
with open(text_file, "r", encoding="utf-8") as text:
lines = text.readlines()
sents = ''
for line in lines:
seg_line = jieba.cut(line.strip(), HMM=False)
seg_line = ' '.join(seg_line)
sents += seg_line + '\n'
return sents
示例2: testSetDictionary
# 需要导入模块: import jieba [as 别名]
# 或者: from jieba import set_dictionary [as 别名]
def testSetDictionary(self):
jieba.set_dictionary("foobar.txt")
for content in test_contents:
result = jieba.cut(content)
assert isinstance(result, types.GeneratorType), "Test SetDictionary Generator error"
result = list(result)
assert isinstance(result, list), "Test SetDictionary error on content: %s" % content
print(" , ".join(result), file=sys.stderr)
print("testSetDictionary", file=sys.stderr)
示例3: main
# 需要导入模块: import jieba [as 别名]
# 或者: from jieba import set_dictionary [as 别名]
def main():
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# jieba custom setting.
jieba.set_dictionary('jieba_dict/dict.txt.big')
# load stopwords set
stopword_set = set()
with io.open('jieba_dict/stopwords.txt', 'r', encoding='utf-8') as stopwords:
for stopword in stopwords:
stopword_set.add(stopword.strip('\n'))
output = io.open('wiki_seg.txt','w', encoding='utf-8')
with io.open('wiki_zh_tw.txt','r', encoding='utf-8') as content :
for texts_num, line in enumerate(content):
line = line.strip('\n')
words = jieba.cut(line, cut_all=False)
for word in words:
if word not in stopword_set:
output.write(word +' ')
output.write('\n')
if (texts_num + 1) % 10000 == 0:
logging.info("已完成前 %d 行的斷詞" % (texts_num + 1))
output.close()
示例4: main
# 需要导入模块: import jieba [as 别名]
# 或者: from jieba import set_dictionary [as 别名]
def main():
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# jieba custom setting.
jieba.set_dictionary('jieba_dict/dict.txt.big')
# load stopwords set
stopword_set = set()
with open('jieba_dict/stopwords.txt','r', encoding='utf-8') as stopwords:
for stopword in stopwords:
stopword_set.add(stopword.strip('\n'))
output = open('wiki_seg.txt', 'w', encoding='utf-8')
with open('wiki_zh_tw.txt', 'r', encoding='utf-8') as content :
for texts_num, line in enumerate(content):
line = line.strip('\n')
words = jieba.cut(line, cut_all=False)
for word in words:
if word not in stopword_set:
output.write(word + ' ')
output.write('\n')
if (texts_num + 1) % 10000 == 0:
logging.info("已完成前 %d 行的斷詞" % (texts_num + 1))
output.close()
示例5: init_jieba
# 需要导入模块: import jieba [as 别名]
# 或者: from jieba import set_dictionary [as 别名]
def init_jieba(self, seg_dic, userdic):
"""
jieba custom setting.
"""
jieba.load_userdict(userdic)
jieba.set_dictionary(seg_dic)
with open(userdic,'r',encoding='utf-8') as input:
for word in input:
word = word.strip('\n')
jieba.suggest_freq(word, True)
示例6: jiebaCustomSetting
# 需要导入模块: import jieba [as 别名]
# 或者: from jieba import set_dictionary [as 别名]
def jiebaCustomSetting(self, dict_path, usr_dict_path):
jieba.set_dictionary(dict_path)
with open(usr_dict_path, 'r', encoding='utf-8') as dic:
for word in dic:
jieba.add_word(word.strip('\n'))
示例7: setUp
# 需要导入模块: import jieba [as 别名]
# 或者: from jieba import set_dictionary [as 别名]
def setUp(self):
jieba.set_dictionary(APP_RESOURCES_DATA_PATH + 'jieba.dict') # 设置中文分词库
示例8: set_default_dict
# 需要导入模块: import jieba [as 别名]
# 或者: from jieba import set_dictionary [as 别名]
def set_default_dict(tokenizer, path_default_dict):
print("Setting Jieba Default Dictionary at " + str(path_default_dict))
tokenizer.set_dictionary(path_default_dict)
return tokenizer