當前位置: 首頁>>代碼示例>>Python>>正文


Python jieba.set_dictionary方法代碼示例

本文整理匯總了Python中jieba.set_dictionary方法的典型用法代碼示例。如果您正苦於以下問題:Python jieba.set_dictionary方法的具體用法?Python jieba.set_dictionary怎麽用?Python jieba.set_dictionary使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在jieba的用法示例。


在下文中一共展示了jieba.set_dictionary方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: segment_trans

# 需要導入模塊: import jieba [as 別名]
# 或者: from jieba import set_dictionary [as 別名]
def segment_trans(vocab_file, text_file):
    ''' segment transcripts according to vocab
        using Maximum Matching Algorithm
    Args:
      vocab_file: vocab file
      text_file: transcripts file
    Returns:
      seg_trans: segment words
    '''
    jieba.set_dictionary(vocab_file)
    with open(text_file, "r", encoding="utf-8") as text:
        lines = text.readlines()
        sents = ''
        for line in lines:
            seg_line = jieba.cut(line.strip(), HMM=False)
            seg_line = ' '.join(seg_line)
            sents += seg_line + '\n'
        return sents 
開發者ID:athena-team,項目名稱:athena,代碼行數:20,代碼來源:segment_word.py

示例2: testSetDictionary

# 需要導入模塊: import jieba [as 別名]
# 或者: from jieba import set_dictionary [as 別名]
def testSetDictionary(self):
        jieba.set_dictionary("foobar.txt")
        for content in test_contents:
            result = jieba.cut(content)
            assert isinstance(result, types.GeneratorType), "Test SetDictionary Generator error"
            result = list(result)
            assert isinstance(result, list), "Test SetDictionary error on content: %s" % content
            print(" , ".join(result), file=sys.stderr)
        print("testSetDictionary", file=sys.stderr) 
開發者ID:deepcs233,項目名稱:jieba_fast,代碼行數:11,代碼來源:jieba_test.py

示例3: main

# 需要導入模塊: import jieba [as 別名]
# 或者: from jieba import set_dictionary [as 別名]
def main():

    logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)

    # jieba custom setting.
    jieba.set_dictionary('jieba_dict/dict.txt.big')

    # load stopwords set
    stopword_set = set()
    with io.open('jieba_dict/stopwords.txt', 'r', encoding='utf-8') as stopwords:
        for stopword in stopwords:
            stopword_set.add(stopword.strip('\n'))

    output = io.open('wiki_seg.txt','w', encoding='utf-8')
    with io.open('wiki_zh_tw.txt','r', encoding='utf-8') as content :
        for texts_num, line in enumerate(content):
            line = line.strip('\n')
            words = jieba.cut(line, cut_all=False)
            for word in words:
                if word not in stopword_set:
                    output.write(word +' ')
            output.write('\n')

            if (texts_num + 1) % 10000 == 0:
                logging.info("已完成前 %d 行的斷詞" % (texts_num + 1))
    output.close() 
開發者ID:zake7749,項目名稱:word2vec-tutorial,代碼行數:28,代碼來源:segment.py

示例4: main

# 需要導入模塊: import jieba [as 別名]
# 或者: from jieba import set_dictionary [as 別名]
def main():

    logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)

    # jieba custom setting.
    jieba.set_dictionary('jieba_dict/dict.txt.big')

    # load stopwords set
    stopword_set = set()
    with open('jieba_dict/stopwords.txt','r', encoding='utf-8') as stopwords:
        for stopword in stopwords:
            stopword_set.add(stopword.strip('\n'))

    output = open('wiki_seg.txt', 'w', encoding='utf-8')
    with open('wiki_zh_tw.txt', 'r', encoding='utf-8') as content :
        for texts_num, line in enumerate(content):
            line = line.strip('\n')
            words = jieba.cut(line, cut_all=False)
            for word in words:
                if word not in stopword_set:
                    output.write(word + ' ')
            output.write('\n')

            if (texts_num + 1) % 10000 == 0:
                logging.info("已完成前 %d 行的斷詞" % (texts_num + 1))
    output.close() 
開發者ID:zake7749,項目名稱:word2vec-tutorial,代碼行數:28,代碼來源:segment.py

示例5: init_jieba

# 需要導入模塊: import jieba [as 別名]
# 或者: from jieba import set_dictionary [as 別名]
def init_jieba(self, seg_dic, userdic):

        """
        jieba custom setting.
        """

        jieba.load_userdict(userdic)
        jieba.set_dictionary(seg_dic)
        with open(userdic,'r',encoding='utf-8') as input:
            for word in input:
                word = word.strip('\n')
                jieba.suggest_freq(word, True) 
開發者ID:zake7749,項目名稱:Chatbot,代碼行數:14,代碼來源:console.py

示例6: jiebaCustomSetting

# 需要導入模塊: import jieba [as 別名]
# 或者: from jieba import set_dictionary [as 別名]
def jiebaCustomSetting(self, dict_path, usr_dict_path):

        jieba.set_dictionary(dict_path)
        with open(usr_dict_path, 'r', encoding='utf-8') as dic:
            for word in dic:
                jieba.add_word(word.strip('\n')) 
開發者ID:zake7749,項目名稱:Chatbot,代碼行數:8,代碼來源:matcher.py

示例7: setUp

# 需要導入模塊: import jieba [as 別名]
# 或者: from jieba import set_dictionary [as 別名]
def setUp(self):
        jieba.set_dictionary(APP_RESOURCES_DATA_PATH + 'jieba.dict')  # 設置中文分詞庫 
開發者ID:tenstone,項目名稱:kim-voice-assistant,代碼行數:4,代碼來源:plugins_test.py

示例8: set_default_dict

# 需要導入模塊: import jieba [as 別名]
# 或者: from jieba import set_dictionary [as 別名]
def set_default_dict(tokenizer, path_default_dict):
        print("Setting Jieba Default Dictionary at " + str(path_default_dict))
        tokenizer.set_dictionary(path_default_dict)
        
        return tokenizer 
開發者ID:crownpku,項目名稱:Rasa_NLU_Chi,代碼行數:7,代碼來源:jieba_tokenizer.py


注:本文中的jieba.set_dictionary方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。