本文整理匯總了Python中textblob.TextBlob方法的典型用法代碼示例。如果您正苦於以下問題:Python textblob.TextBlob方法的具體用法?Python textblob.TextBlob怎麽用?Python textblob.TextBlob使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類textblob
的用法示例。
在下文中一共展示了textblob.TextBlob方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: get_sentiment_analysis
# 需要導入模塊: import textblob [as 別名]
# 或者: from textblob import TextBlob [as 別名]
def get_sentiment_analysis(text, coins):
"""Return the sentiment analysis of coins mentioned in text in
the form of a dictionary that aggregates the sentiment of
sentences that include each of the coins.
"""
sentiment = {}
blob = TextBlob(text)
for sentence in blob.sentences:
lowercase_words = [x.lower() for x in sentence.words]
for coin in coins:
if coin[0].lower() in lowercase_words or coin[1].lower() in lowercase_words:
try:
sentiment[coin] += sentence.sentiment.polarity
except:
sentiment[coin] = sentence.sentiment.polarity
return sentiment, blob.sentiment.polarity
示例2: create_data
# 需要導入模塊: import textblob [as 別名]
# 或者: from textblob import TextBlob [as 別名]
def create_data(X: dt.Frame = None) -> Union[str, List[str],
dt.Frame, List[dt.Frame],
np.ndarray, List[np.ndarray],
pd.DataFrame, List[pd.DataFrame]]:
# exit gracefully if method is called as a data upload rather than data modify
if X is None:
return []
import os
from h2oaicore.systemutils import config
from textblob import TextBlob
X = dt.Frame(X).to_pandas()
for text_colname in text_colnames:
X["sentiment_dai_" + text_colname] = X[text_colname].astype(str).fillna("NA").apply(
lambda x: TextBlob(x).sentiment[0])
temp_path = os.path.join(config.data_directory, config.contrib_relative_directory)
os.makedirs(temp_path, exist_ok=True)
# Save files to disk
file_train = os.path.join(temp_path, output_dataset_name + ".csv")
X.to_csv(file_train, index=False)
return [file_train]
示例3: stars_in_review
# 需要導入模塊: import textblob [as 別名]
# 或者: from textblob import TextBlob [as 別名]
def stars_in_review(x, low_rating_strs, high_rating_strs):
if not isinstance(x.review_text, str):
return ABSTAIN
for low_rating_str in low_rating_strs:
if low_rating_str in x.review_text.lower():
return NEGATIVE
for high_rating_str in high_rating_strs:
if high_rating_str in x.review_text.lower():
return POSITIVE
return ABSTAIN
# %% [markdown]
# We can also run [TextBlob](https://textblob.readthedocs.io/en/dev/index.html), a tool that provides a pretrained sentiment analyzer, on the reviews, and use its polarity and subjectivity scores to estimate the user's rating for the book.
# As usual, these thresholds were picked by analyzing the score distributions and running error analysis.
# %%
示例4: analize_sentiment
# 需要導入模塊: import textblob [as 別名]
# 或者: from textblob import TextBlob [as 別名]
def analize_sentiment(tweet):
'''
Utility function to classify the polarity of a tweet
using textblob.
'''
try:
analysis = textblob.TextBlob(data_preparation(tweet))
except:
analysis = textblob.TextBlob(tweet)
if analysis.sentiment.polarity > 0:
return 1, int(analysis.sentiment.subjectivity)
elif analysis.sentiment.polarity == 0:
return 0, int(analysis.sentiment.subjectivity)
else:
return -1, int(analysis.sentiment.subjectivity)
示例5: _translate_message
# 需要導入模塊: import textblob [as 別名]
# 或者: from textblob import TextBlob [as 別名]
def _translate_message(bot, broadcast_list, context):
if context and "autotranslate" in context:
_autotranslate = context["autotranslate"]
origin_language = _get_room_language(bot, _autotranslate["conv_id"])
for send in broadcast_list:
target_conversation_id = send[0]
response = send[1]
target_language = _get_room_language(bot, target_conversation_id)
if origin_language != target_language:
logger.debug("translating {} to {}".format(origin_language, target_language))
translated = _autotranslate["event_text"]
try:
en_blob = TextBlob(_autotranslate["event_text"])
translated = "{0}".format(en_blob.translate(to=target_language))
#translated = gs.translate(_autotranslate["event_text"], target_language
except Exception:
logger.debug("Translation Api returned string unchanged")
else:
pass
finally:
if _autotranslate["event_text"] != translated:
# mutate the original response by reference
response.extend([
hangups.ChatMessageSegment('\n', hangups.SegmentType.LINE_BREAK),
hangups.ChatMessageSegment('(' + translated + ')')])
示例6: translate
# 需要導入模塊: import textblob [as 別名]
# 或者: from textblob import TextBlob [as 別名]
def translate(self, ctx: Context, to_lang: str, *, text: str):
"""Translate to another language.
Example:
!translate es Simple is better than complex.
will translate sentence to Spanish.
!translatelang
will list all the supported languages
"""
out = await self._translate(text, dest=to_lang)
await self.bot.say(out)
# blob = TextBlob(text)
# out = blob.translate(to=to_lang)
# await self.bot.say(out)
示例7: build_dict
# 需要導入模塊: import textblob [as 別名]
# 或者: from textblob import TextBlob [as 別名]
def build_dict(params):
story_line = json.load(open(params['input_json'], 'r'))
wtoi = story_line['words2id']
count = 0
refs_words = []
for stories in story_line['album2stories'][params['split']].values():
ref_words = []
for story_id in stories:
txt = story_line[params['split']][story_id]['origin_text']
tmp_tokens = TextBlob(txt).tokens + ['<EOS>']
tmp_tokens = [_ if _ in wtoi else '<UNK>' for _ in tmp_tokens]
ref_words.append(' '.join(tmp_tokens))
refs_words.append(ref_words)
count += 1
print('total albums: ', count)
ngram_words = compute_doc_freq(create_crefs(refs_words))
return ngram_words, count
示例8: find_noun_phrases
# 需要導入模塊: import textblob [as 別名]
# 或者: from textblob import TextBlob [as 別名]
def find_noun_phrases(string):
noun_counts = {}
try:
blob = TextBlob(string.decode('utf-8'))
except:
print "Error occured"
return None
if blob.detect_language() != "en":
print "Tweets are not in English"
sys.exit(1)
else:
for noun in blob.noun_phrases:
if noun in stopwords.words('english') or noun in extra_stopwords or noun == '' or len(noun) < 3:
pass
else:
noun_counts[noun.lower()] = blob.words.count(noun)
sorted_noun_counts = sorted(noun_counts.items(), key=operator.itemgetter(1),reverse=True)
return sorted_noun_counts[0:15]
示例9: generate_html
# 需要導入模塊: import textblob [as 別名]
# 或者: from textblob import TextBlob [as 別名]
def generate_html(paragraphs, title_text):
doc = dominate.document(title='Summary: {}'.format(title_text))
with doc.head:
style("""\
body {
background-color: #F9F8F1;
color: #2C232A;
font-family: sans-serif;
font-size: 1.2em;
}
""")
with doc:
div(id='header').add(h1(title_text))
with div():
attr(cls='body')
for para in paragraphs:
tb = TextBlob(para)
with p():
for sentence in tb.sentences:
span(sentence, style="color: {}".format(get_polarity_color(sentence.polarity)))
return doc
示例10: read_line_eos_noums
# 需要導入模塊: import textblob [as 別名]
# 或者: from textblob import TextBlob [as 別名]
def read_line_eos_noums(self,
path):
"""
Generator.
Similar as the function read_line_eos from
the text_mani module. The only diference here
is that we keep track of all the noums.
:type path: str
"""
for line in open(path):
if len(list(self.all_noums)) <= self.max_noums:
blob = TextBlob(line)
noums = set(blob.noun_phrases)
self.all_noums = self.all_noums.union(noums)
for word in line.split():
yield word
yield '<eos>'
示例11: __init__
# 需要導入模塊: import textblob [as 別名]
# 或者: from textblob import TextBlob [as 別名]
def __init__(self, raw_text=None, text_title=None):
try:
# props for internal use
self._raw_text = raw_text
self._text_title = text_title
# props to store data
self._summary = str()
self._keywords = set()
self._iocs = dict()
self._tlp = None
self._debug = dict({'iocs': dict(), 'keywords': dict()})
if self._raw_text != None:
if not type(self._raw_text) is unicode:
self._raw_text = self._raw_text.decode('utf8')
self._tlpfilter = TLPFilter()
self._clean_text = self._tlpfilter.text(self._raw_text)
self._blob = TextBlob(self._raw_text)
self._clean_blob = TextBlob(self._clean_text)
except Exception as e:
import traceback
traceback.print_exc()
示例12: iter_filth
# 需要導入模塊: import textblob [as 別名]
# 或者: from textblob import TextBlob [as 別名]
def iter_filth(self, text):
if not isinstance(self.disallowed_nouns, CanonicalStringSet):
raise TypeError(
'NameDetector.disallowed_nouns must be CanonicalStringSet'
)
# find the set of proper nouns using textblob.
proper_nouns = set()
blob = textblob.TextBlob(text)
for word, part_of_speech in blob.tags:
is_proper_noun = part_of_speech in ("NNP", "NNPS")
if is_proper_noun and word.lower() not in self.disallowed_nouns:
proper_nouns.add(word)
# use a regex to replace the proper nouns by first escaping any
# lingering punctuation in the regex
# http://stackoverflow.com/a/4202559/564709
if proper_nouns:
re_list = []
for proper_noun in proper_nouns:
re_list.append(r'\b' + re.escape(str(proper_noun)) + r'\b')
self.filth_cls.regex = re.compile('|'.join(re_list))
else:
self.filth_cls.regex = None
return super(NameDetector, self).iter_filth(text)
示例13: SentimentAnalysis
# 需要導入模塊: import textblob [as 別名]
# 或者: from textblob import TextBlob [as 別名]
def SentimentAnalysis(_arg1, library="nltk"):
"""
Sentiment Analysis is a procedure that assigns a score from -1 to 1
for a piece of text with -1 being negative and 1 being positive. For
more information on the function and how to use it please refer to
tabpy-tools.md
"""
if not (isinstance(_arg1[0], str)):
raise TypeError
supportedLibraries = {"nltk", "textblob"}
library = library.lower()
if library not in supportedLibraries:
raise ValueError
scores = []
if library == "nltk":
sid = SentimentIntensityAnalyzer()
for text in _arg1:
sentimentResults = sid.polarity_scores(text)
score = sentimentResults["compound"]
scores.append(score)
elif library == "textblob":
for text in _arg1:
currScore = TextBlob(text)
scores.append(currScore.sentiment.polarity)
return scores
示例14: analyze_sentiment
# 需要導入模塊: import textblob [as 別名]
# 或者: from textblob import TextBlob [as 別名]
def analyze_sentiment(self, tweet):
analysis = TextBlob(self.clean_tweet(tweet))
if analysis.sentiment.polarity > 0:
return 1
elif analysis.sentiment.polarity == 0:
return 0
else:
return -1
示例15: noun_phrases
# 需要導入模塊: import textblob [as 別名]
# 或者: from textblob import TextBlob [as 別名]
def noun_phrases(text):
blob = TextBlob(text)
return blob.tokenize()