當前位置: 首頁>>代碼示例>>Python>>正文


Python re.L屬性代碼示例

本文整理匯總了Python中re.L屬性的典型用法代碼示例。如果您正苦於以下問題:Python re.L屬性的具體用法?Python re.L怎麽用?Python re.L使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在re的用法示例。


在下文中一共展示了re.L屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: pynlp_build_key_word

# 需要導入模塊: import re [as 別名]
# 或者: from re import L [as 別名]
def pynlp_build_key_word(filename):
    d={}
    with open(filename, encoding="utf-8") as fp:
        for line in fp:
            s = line
            p = re.compile(r'http?://.+$')  # 正則表達式,提取URL
            result = p.findall(line)  # 找出所有url
            if len(result):
                for i in result:
                    s = s.replace(i, '')  # 一個一個的刪除
            temp = pynlpir.segment(s, pos_tagging=False)  # 分詞
            for i in temp:
                if '@' in i:
                    temp.remove(i)  # 刪除分詞中的名字
                p = re.compile(r'\w', re.L)
                result = p.sub("", i)
                if not result or result == ' ':  # 空字符
                    continue
                if len(i) > 1:  # 避免大量無意義的詞語進入統計範圍
                    d[i] = d.get(i, 0) + 1
    kw_list = sorted(d, key=lambda x: d[x], reverse=True)
    size = int(len(kw_list) * 0.2)  # 取最前的30%
    mood = set(kw_list[:size])
    return list(mood - set(stop)- set('\u200b') - set(' ') - set('\u3000')) 
開發者ID:Zephery,項目名稱:weiboanalysis,代碼行數:26,代碼來源:tool.py

示例2: loadDataSet

# 需要導入模塊: import re [as 別名]
# 或者: from re import L [as 別名]
def loadDataSet(path):  # 返回每條微博的分詞與標簽
    line_cut = []
    label = []
    with open(path, encoding="utf-8") as fp:
        for line in fp:
            temp = line.strip()
            try:
                sentence = temp[2:].lstrip()  # 每條微博
                label.append(int(temp[:2]))  # 獲取標注
                word_list = []
                sentence = str(sentence).replace('\u200b', '')
                for word in jieba.cut(sentence.strip()):
                    p = re.compile(r'\w', re.L)
                    result = p.sub("", word)
                    if not result or result == ' ':  # 空字符
                        continue
                    word_list.append(word)
                word_list = list(set(word_list) - set(stop) - set('\u200b')
                                 - set(' ') - set('\u3000') - set('️'))
                line_cut.append(word_list)
            except Exception:
                continue
    return line_cut, label  # 返回每條微博的分詞和標注 
開發者ID:Zephery,項目名稱:weiboanalysis,代碼行數:25,代碼來源:Bayes.py

示例3: getmatch

# 需要導入模塊: import re [as 別名]
# 或者: from re import L [as 別名]
def getmatch(self, haystack):
        if not isinstance(haystack, basestring):
            return None
        flags = 0
        if self.flags is not None:
            if "i" in self.flags or "I" in self.flags:
                flags |= re.I
            if "l" in self.flags or "L" in self.flags:
                flags |= re.L
            if "m" in self.flags or "M" in self.flags:
                flags |= re.M
            if "s" in self.flags or "S" in self.flags:
                flags |= re.S
            if "u" in self.flags or "U" in self.flags:
                flags |= re.U
            if "x" in self.flags or "X" in self.flags:
                flags |= re.X
        if re.match(self.pattern, haystack, flags=flags) is None:
            return None
        elif self.to is None:
            return Match(haystack, haystack)
        else:
            return Match(haystack, re.sub(self.pattern, self.to, haystack, flags=flags)) 
開發者ID:modelop,項目名稱:hadrian,代碼行數:25,代碼來源:tools.py

示例4: loadDataSet

# 需要導入模塊: import re [as 別名]
# 或者: from re import L [as 別名]
def loadDataSet(path):  # 返回每條微博的分詞與標簽
    line_cut = []
    label = []
    with open(path, encoding="utf-8") as fp:
        for line in fp:
            temp = line.strip()
            try:
                sentence = temp[2:].lstrip()  # 每條微博
                label.append(int(temp[:2]))  # 獲取標注
                word_list = []
                sentence = str(sentence).replace('\u200b', '')
                for word in jieba.cut(sentence.strip()):
                    p = re.compile(b'\w', re.L)
                    result = p.sub(b"", bytes(word, encoding="utf-8")).decode("utf-8")
                    if not result or result == ' ':  # 空字符
                        continue
                    word_list.append(word)
                word_list = list(set(word_list) - set(stop) - set('\u200b')
                                 - set(' ') - set('\u3000') - set('️'))
                line_cut.append(word_list)
            except Exception:
                continue
    return line_cut, label  # 返回每條微博的分詞和標注 
開發者ID:Zephery,項目名稱:weiboanalysis,代碼行數:25,代碼來源:Bayes.py

示例5: preprocessing

# 需要導入模塊: import re [as 別名]
# 或者: from re import L [as 別名]
def preprocessing(content):
    remove_punc = ('。 ; 。 、 」 「 , ( ) —').split(' ')
    ## preprocessing #1 : remove XXenglishXX and numbers
    preprocessing_1 = re.compile(r'\d*',re.L)  ## only substitute numbers
    #preprocessing_1 = re.compile(r'\w*',re.L)  ## substitute number & English
    content = preprocessing_1.sub("",content)
    ## preprocessing #2 : remove punctuation
    preprocessing_2 = re.compile('[%s]' % re.escape(string.punctuation))
    content = preprocessing_2.sub("",content)
    ## preprocessing #3 : remove Chinese punctuation and multiple whitspaces
    content = content.replace('\n','')
    for punc in remove_punc:
        content = content.replace(punc,'')
    try:
        content = parsing.strip_multiple_whitespaces(content)
    except:
        print 'Warning : failed to strip whitespaces @ '   
    
    return content 
開發者ID:easonchan1213,項目名稱:LDA_RecEngine,代碼行數:21,代碼來源:utils.py

示例6: _branch_flags

# 需要導入模塊: import re [as 別名]
# 或者: from re import L [as 別名]
def _branch_flags(flags):
        flagsbyte = 0
        for flag in flags:
            if flag == "i":
                flagsbyte += re.I
            elif flag == "L":
                flagsbyte += re.L
            elif flag == "m":
                flagsbyte += re.M
            elif flag == "s":
                flagsbyte += re.S
            elif flag == "u":
                flagsbyte += re.U
            elif flag == "x":
                flagsbyte += re.X
        return flagsbyte 
開發者ID:scikit-hep,項目名稱:uproot,代碼行數:18,代碼來源:tree.py

示例7: build_key_word

# 需要導入模塊: import re [as 別名]
# 或者: from re import L [as 別名]
def build_key_word(path):  # 通過詞頻產生key word
    d = {}
    with open(path, encoding="utf-8") as fp:
        for line in fp:
            for word in jieba.cut(line.strip()):
                p = re.compile(r'\w', re.L)
                result = p.sub("", word)
                if not result or result == ' ':  # 空字符
                    continue
                if len(word) > 1:  # 避免大量無意義的詞語進入統計範圍
                    d[word] = d.get(word, 0) + 1
    kw_list = sorted(d, key=lambda x: d[x], reverse=True)
    size = int(len(kw_list) * 0.15)  # 取最前的30%
    mood = set(kw_list[:size])
    return list(mood - set(stop)) 
開發者ID:Zephery,項目名稱:weiboanalysis,代碼行數:17,代碼來源:tool.py

示例8: get_word_feature

# 需要導入模塊: import re [as 別名]
# 或者: from re import L [as 別名]
def get_word_feature(sentence):
    wordlist = []
    sentence = str(sentence).replace('\u200b', '')
    for word in jieba.cut(sentence.strip()):
        p = re.compile(r'\w', re.L)
        result = p.sub("", word)
        if not result or result == ' ':  # 空字符
            continue
        wordlist.append(word)
    return list(set(wordlist) - set(stop) - set(' ')) 
開發者ID:Zephery,項目名稱:weiboanalysis,代碼行數:12,代碼來源:tool.py

示例9: build_key_word

# 需要導入模塊: import re [as 別名]
# 或者: from re import L [as 別名]
def build_key_word(path):  # 通過詞頻產生特征
    d = {}
    with open(path, encoding="utf-8") as fp:
        for line in fp:
            for word in jieba.cut(line.strip()):
                p = re.compile(r'\w', re.L)
                result = p.sub("", word)
                if not result or result == ' ':  # 空字符
                    continue
                if len(word) > 1:  # 避免大量無意義的詞語進入統計範圍
                    d[word] = d.get(word, 0) + 1
    kw_list = sorted(d, key=lambda x: d[x], reverse=True)
    size = int(len(kw_list) * 0.2)  # 取最前的30%
    mood = set(kw_list[:size])
    return list(mood - set(stop)) 
開發者ID:Zephery,項目名稱:weiboanalysis,代碼行數:17,代碼來源:Bayes.py

示例10: loadDataSet

# 需要導入模塊: import re [as 別名]
# 或者: from re import L [as 別名]
def loadDataSet(path):  # 返回每條微博的分詞與標簽
    line_cut = []
    label = []
    with open(path, encoding="utf-8") as fp:
        for line in fp:
            temp = line.strip()
            try:
                sentence = temp[2:].lstrip()  # 每條微博
                first_label = int(temp[:2])
                if first_label == 3:
                    continue
                label.append(first_label)  # 獲取標注
                word_list = []
                sentence = str(sentence).replace('\u200b', '')
                for word in jieba.cut(sentence.strip()):
                    p = re.compile(r'\w', re.L)
                    result = p.sub("", word)
                    if not result or result == ' ':  # 空字符
                        continue
                    word_list.append(word)
                word_list = list(set(word_list) - set(stop) - set('\u200b')
                                 - set(' ') - set('\u3000') - set('️'))
                line_cut.append(word_list)
            except Exception:
                continue
    return line_cut, label  # 返回每條微博的分詞和標注 
開發者ID:Zephery,項目名稱:weiboanalysis,代碼行數:28,代碼來源:two_nb.py

示例11: build_key_word

# 需要導入模塊: import re [as 別名]
# 或者: from re import L [as 別名]
def build_key_word(path):  # 通過詞頻產生特征
    d = {}
    with open(path, encoding="utf-8") as fp:
        for line in fp:
            for word in jieba.cut(line.strip()):
                p = re.compile(b'\w', re.L)
                result = p.sub(b"", bytes(word, encoding="utf-8")).decode("utf-8")
                if not result or result == ' ':  # 空字符
                    continue
                if len(word) > 1:  # 避免大量無意義的詞語進入統計範圍
                    d[word] = d.get(word, 0) + 1
    kw_list = sorted(d, key=lambda x: d[x], reverse=True)
    size = int(len(kw_list) * 0.2)  # 取最前的30%
    mood = set(kw_list[:size])
    return list(mood - set(stop)) 
開發者ID:Zephery,項目名稱:weiboanalysis,代碼行數:17,代碼來源:Bayes.py

示例12: test_constants

# 需要導入模塊: import re [as 別名]
# 或者: from re import L [as 別名]
def test_constants(self):
        self.assertEqual(re.I, re.IGNORECASE)
        self.assertEqual(re.L, re.LOCALE)
        self.assertEqual(re.M, re.MULTILINE)
        self.assertEqual(re.S, re.DOTALL)
        self.assertEqual(re.X, re.VERBOSE) 
開發者ID:war-and-code,項目名稱:jawfish,代碼行數:8,代碼來源:test_re.py

示例13: test_flags

# 需要導入模塊: import re [as 別名]
# 或者: from re import L [as 別名]
def test_flags(self):
        for flag in [re.I, re.M, re.X, re.S, re.L]:
            self.assertNotEqual(re.compile('^pattern$', flag), None) 
開發者ID:war-and-code,項目名稱:jawfish,代碼行數:5,代碼來源:test_re.py

示例14: testParseErrors

# 需要導入模塊: import re [as 別名]
# 或者: from re import L [as 別名]
def testParseErrors(self):
        self.assertRaises(sre_yield.ParseError, sre_yield.AllStrings, 'a', re.I)
        self.assertRaises(sre_yield.ParseError, sre_yield.AllStrings, 'a', re.U)
        self.assertRaises(sre_yield.ParseError, sre_yield.AllStrings, 'a', re.L) 
開發者ID:girishramnani,項目名稱:hacking-tools,代碼行數:6,代碼來源:test_sre_yield.py

示例15: test_flags

# 需要導入模塊: import re [as 別名]
# 或者: from re import L [as 別名]
def test_flags(self):
        for flag in [re.I, re.M, re.X, re.S, re.L]:
            self.assertTrue(re.compile('^pattern$', flag)) 
開發者ID:IronLanguages,項目名稱:ironpython2,代碼行數:5,代碼來源:test_re.py


注:本文中的re.L屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。