當前位置: 首頁>>代碼示例>>Python>>正文


Python snownlp.SnowNLP方法代碼示例

本文整理匯總了Python中snownlp.SnowNLP方法的典型用法代碼示例。如果您正苦於以下問題:Python snownlp.SnowNLP方法的具體用法?Python snownlp.SnowNLP怎麽用?Python snownlp.SnowNLP使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在snownlp的用法示例。


在下文中一共展示了snownlp.SnowNLP方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: post

# 需要導入模塊: import snownlp [as 別名]
# 或者: from snownlp import SnowNLP [as 別名]
def post(self):
        args = parser.parse_args()
        if args['type'] == 'hotel_tfidf':
            result = hotel_tfidf_clf.predict([args['sentence']]).astype(np.str)
            return result[0], 200
        elif args['type'] == 'douban_wb':
            result = douban_wb_clf.predict([args['sentence']]).astype(np.str)
            return result[0], 200
        # elif args['type'] == 'restaurant':
        #     result = restaurant_clf.predict([args['sentence']]).astype(np.str)
        #     return result[0], 200
        elif args['type'] == 'douban_snowNLP':
            s = SnowNLPNew(args['sentence'])
            return s.sentiments*5, 200
        else:
            s = SnowNLP(args['sentence'])
            return s.sentiments*5, 200 
開發者ID:bighuang624,項目名稱:sentiment-analysis-webapp,代碼行數:19,代碼來源:app.py

示例2: output

# 需要導入模塊: import snownlp [as 別名]
# 或者: from snownlp import SnowNLP [as 別名]
def output(poem):
    '''美化輸出格式'''
    poem = poem.strip('/')
    lenth = len(poem.split('/')[0])*2 + 8
    print()
    print('-'*lenth)
    print()
    for i in poem.split('/'):
        print('     '+i)
    print()
    print('-'*lenth)
    print()
    snow = SnowNLP(poem)
    print("情感傾向:{}".format(snow.sentiments))
    print()
    print()
    time.sleep(random.random()*2) 
開發者ID:ZubinGou,項目名稱:AI_Poet_Totoro,代碼行數:19,代碼來源:poetize_plus.py

示例3: emotionParser

# 需要導入模塊: import snownlp [as 別名]
# 或者: from snownlp import SnowNLP [as 別名]
def emotionParser(*names):
    conn = conn = sqlite3.connect("deal_data.db")
    conn.text_factory = str
    cursor = conn.cursor()
    likeStr = ""
    for i in range(0, len(names)):
        likeStr = likeStr +  "like \"%" + names[i] + "%\" "
        if i + 1 < len(names):
            likeStr = likeStr + " or "
    print likeStr
    cursor.execute("select content from realData where content " + likeStr)
    values = cursor.fetchall()
    sentimentslist = []
    for item in values:
        # print SnowNLP(item[0].decode("utf-8")).words
        sentimentslist.append(SnowNLP(item[0].decode("utf-8")).sentiments)
    plt.hist(sentimentslist, bins=np.arange(0, 1, 0.01), facecolor="#4F8CD6")  
    plt.xlabel("Sentiments Probability")                                       
    plt.ylabel("Quantity")                                                     
    plt.title("Analysis of Sentiments for Lidan")                                        
    plt.show()
    cursor.close()
    conn.close() 
開發者ID:Pinned,項目名稱:ICanIBBData,代碼行數:25,代碼來源:LiDanEmotionParser.py

示例4: wallstr_news

# 需要導入模塊: import snownlp [as 別名]
# 或者: from snownlp import SnowNLP [as 別名]
def wallstr_news():
    user_agent='Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
    headers={'User-Agent':user_agent}
    url='https://wallstreetcn.com/live/blockchain'
    raw_page=requests.get(url,headers=headers)
    page=bs(raw_page.text)
    blockchain_news=page.find_all('div',class_="wscn-tab-pane")[1]
    big_news=blockchain_news.find_all('div',class_='live-item score-2')
    normal_news=blockchain_news.find_all('div',class_='live-item score-1')
    s=0
    count=0
    for i in big_news:
        text=i.find('div',class_='content-html').get_text()
        sen=SnowNLP(text)
        sentiment=sen.sentiments
        s=s+2*sentiment
        count=count+2
    for i in normal_news:
        text=i.find('div',class_='content-html').get_text()
        sen=SnowNLP(text)
        sentiment=sen.sentiments
        s=s+sentiment
        count=count+1
    total_sentiment=s/count
    return(total_sentiment) 
開發者ID:benjaminshi02003220,項目名稱:Smart-Bitcoin-Auto-Trading-Bot-based-on-Nerual-Networks,代碼行數:27,代碼來源:wallstreet_news.py

示例5: cal_sentiment

# 需要導入模塊: import snownlp [as 別名]
# 或者: from snownlp import SnowNLP [as 別名]
def cal_sentiment(text):
    """
    calculate the sentiment value of a particular sentence powered by SnowNLP
    :param text: 
    :return: 
    """
    s = SnowNLP(text)

    return s.sentiments 
開發者ID:lucasxlu,項目名稱:LagouJob,代碼行數:11,代碼來源:sentiment.py

示例6: zh_simplify_v2

# 需要導入模塊: import snownlp [as 別名]
# 或者: from snownlp import SnowNLP [as 別名]
def zh_simplify_v2(line, server_model):
    return SnowNLP(line).han 
開發者ID:OpenNMT,項目名稱:OpenNMT-py,代碼行數:4,代碼來源:process_zh.py

示例7: zh_simplify_v2

# 需要導入模塊: import snownlp [as 別名]
# 或者: from snownlp import SnowNLP [as 別名]
def zh_simplify_v2(line):
    return SnowNLP(line).han 
開發者ID:memray,項目名稱:OpenNMT-kpg-release,代碼行數:4,代碼來源:process_zh.py

示例8: sentiments_analysis

# 需要導入模塊: import snownlp [as 別名]
# 或者: from snownlp import SnowNLP [as 別名]
def sentiments_analysis():
    from snownlp import SnowNLP
    # 讀取評論
    df = pd.read_csv('comment.csv', sep=';', header=None)
    # 獲取情感評分
    sentiment = lambda x:SnowNLP(x).sentiments
    df[2] = df[1].apply(sentiment)
    # 寫入csv
    # df.to_csv('comment_sentiments.csv', sep=';', index=False, header=False)
    # 整理數據
    df.columns = ['date', 'comment', 'sentiment']
    df['date'] = pd.to_datetime(df['date'])
    df = df.set_index('date')
    # 篩選日期
    cacu_df = df[:'2019-02-04']['sentiment']
    # 按日統計數量
    cacu = cacu_df.resample('D').mean()
    # 畫圖
    # 使用plot畫pandas建議先注冊
    register_matplotlib_converters()

    # 使用pltz解決中文展示問題
    # 新建pltz對象,用於顯示中文
    # from pyplotz.pyplotz import PyplotZ
    # pltz = PyplotZ()
    # pltz.enable_chinese()
    # pltz.title("流浪地球評論分析")
    # pltz.xlabel("日期")
    # pltz.ylabel("評論數")

    # 通過設置中文字體方式解決中文展示問題
    font = FontProperties(fname='../font/PingFang.ttc')
    plt.title("流浪地球評論分析", fontproperties=font)
    plt.xlabel("日期", fontproperties=font)
    plt.ylabel("好感度", fontproperties=font)

    plt.plot(cacu)
    plt.axis("tight")
    # 顯示網格
    plt.grid(True)
    # 自動旋轉橫軸日期
    plt.gcf().autofmt_xdate()
    # 顯示數值
    for a, b in zip(cacu.index, cacu.values):
        plt.text(a, b, str(round(b, 4)))
    # 保存圖片
    plt.savefig('comment_sentiment_analysis.png')
    # 查看圖片
    plt.show() 
開發者ID:keejo125,項目名稱:web_scraping_and_data_analysis,代碼行數:51,代碼來源:sentiment_analysis.py

示例9: analyseSignature

# 需要導入模塊: import snownlp [as 別名]
# 或者: from snownlp import SnowNLP [as 別名]
def analyseSignature(friends):
    signatures = ''
    emotions = []
    pattern = re.compile("1f\d.+")
    for friend in friends:
        signature = friend['Signature']
        if(signature != None):
            signature = signature.strip().replace('span', '').replace('class', '').replace('emoji', '')
            signature = re.sub(r'1f(\d.+)','',signature)
            if(len(signature)>0):
                nlp = SnowNLP(signature)
                emotions.append(nlp.sentiments)
                signatures += ' '.join(jieba.analyse.extract_tags(signature,5))
    with open('signatures.txt','wt',encoding='utf-8') as file:
         file.write(signatures)

    # Sinature WordCloud
    back_coloring = np.array(Image.open('flower.jpg'))
    wordcloud = WordCloud(
        font_path='simfang.ttf',
        background_color="white",
        max_words=1200,
        mask=back_coloring, 
        max_font_size=75,
        random_state=45,
        width=960, 
        height=720, 
        margin=15
    )

    wordcloud.generate(signatures)
    plt.imshow(wordcloud)
    plt.axis("off")
    plt.show()
    wordcloud.to_file('signatures.jpg')
    
    # Signature Emotional Judgment
    count_good = len(list(filter(lambda x:x>0.66,emotions)))
    count_normal = len(list(filter(lambda x:x>=0.33 and x<=0.66,emotions)))
    count_bad = len(list(filter(lambda x:x<0.33,emotions)))
    print(count_good * 100/len(emotions))
    print(count_normal * 100/len(emotions))
    print(count_bad * 100/len(emotions))
    labels = [u'負麵消極',u'中性',u'正麵積極']
    values = (count_bad,count_normal,count_good)
    plt.rcParams['font.sans-serif'] = ['simHei'] 
    plt.rcParams['axes.unicode_minus'] = False
    plt.xlabel(u'情感判斷')
    plt.ylabel(u'頻數')
    plt.xticks(range(3),labels)
    plt.legend(loc='upper right',)
    plt.bar(range(3), values, color = 'rgb')
    plt.title(u'%s的微信好友簽名信息情感分析' % friends[0]['NickName'])
    plt.show()

# login wechat and extract friends 
開發者ID:qinyuanpei,項目名稱:wechat-analyse,代碼行數:58,代碼來源:main.py

示例10: __call__

# 需要導入模塊: import snownlp [as 別名]
# 或者: from snownlp import SnowNLP [as 別名]
def __call__(self, value, positions=False, chars=False, keeporiginal=False,
                 removestops=True, start_pos=0, start_char=0, tokenize=True,
                  mode='', **kwargs):
        """
        :param value: The unicode string to tokenize.
        :param positions: Whether to record token positions in the token.
        :param chars: Whether to record character offsets in the token.
        :param start_pos: The position number of the first token. For example,
            if you set start_pos=2, the tokens will be numbered 2,3,4,...
            instead of 0,1,2,...
        :param start_char: The offset of the first character of the first
            token. For example, if you set start_char=2, the text "aaa bbb"
            will have chars (2,5),(6,9) instead (0,3),(4,7).
        :param tokenize: if True, the text should be tokenized.
        """
        assert isinstance(value, text_type), "%r is not unicode" % value

        # test
        #fpath = '/Users/astorer/Dev/txtorg/examples/chinese/1.txt'
        #text = open(fpath).read()
        #value = unicode(text,encoding='utf-8')
        # Thanks, isnowfy!
        s = SnowNLP(value)
        tokenlist = s.words

        t = Token(positions, chars, removestops=removestops, mode=mode,
                  **kwargs)
        if not tokenize:
            t.original = t.text = value
            t.boost = 1.0
            if positions:
                t.pos = start_pos
            if chars:
                t.startchar = start_char
                t.endchar = start_char + len(value)
            yield t
        else:
            for (pos,text) in enumerate(tokenlist):
                # we may have some off by one errors
                # what is the starting character of the token?
                start_char_t = value[start_char:].find(text)+start_char
                t.text = text
                #print pos, start_char_t, text
                if positions:
                    t.pos = start_pos+pos
                if chars:
                    t.startchar = start_char_t
                    t.endchar = start_char_t + len(text)
                yield t
                # make the tokens
                # copying from https://bitbucket.org/mchaput/whoosh/src/c9ad870378a0f5167182349b64fc3e09c6ca12df/src/whoosh/analysis/tokenizers.py?at=default 
開發者ID:ChristopherLucas,項目名稱:txtorg,代碼行數:53,代碼來源:chinese.py


注:本文中的snownlp.SnowNLP方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。