本文整理汇总了Python中pattern.web.Twitter类的典型用法代码示例。如果您正苦于以下问题:Python Twitter类的具体用法?Python Twitter怎么用?Python Twitter使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Twitter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Pattern_Module_Twitter_Stream
def Pattern_Module_Twitter_Stream():
# Start Stop Watch
t1 = time.time()
# Create a list to Store the Data
List = Twitter().stream('#Fail')
# For 10 Instances
for second in range(10):
# Get Stream Data
value = List.update(bytes=1024)
# Add Value to List if not Empty
if len(value) == 0:
# Pass
continue
else:
# Storing Results
List.append()
# Print Tweet
print('Tweet: %s') % (value.text)
# Get Sentiment
print('Sentiment Analysis of Tweet: %s') % (TextBlob(str(value.text),
analyzer=NaiveBayesAnalyzer()).sentiment[0].upper())
# Wait 3 Seconds between queries - Do not want to get blocked
time.sleep(3)
return time.time() - t1
示例2: get_tweets
def get_tweets():
'''This function parses Twitter to find tweets about a user-defined political figure
'''
print 'This program measures the average sentiment of the populous towards a political candidate through the analysis of recent tweets\n' #introduce program to user
print 'Enter the name of a candidate:'
x = raw_input('> ') #receives name of candidate to search for
print 'Enter number of tweets to search (max = 100)'
twtNumstr = raw_input('> ') #recieve number of tweets to search for
twtNum = int(twtNumstr) #convert to int to use in search
if twtNum <= 1: #check if an invalid number was entered, and if so, correct it to either the minimum or maximum allowed
twtNum = 2
print 'Invalid number entered. The minimum of 2 tweets will be used.'
elif twtNum > 100:
twtNum = 100
print 'Invalid number entered. The maximum of 100 tweets will be used.'
t = Twitter() #search for tweets containing user-defined key word
i = 0
twts = []
for j in range(1):
for tweet in t.search(x, start=i, count=twtNum):
twts.append(tweet.text)
return twts
示例3: search
def search():
query = str(raw_input("enter search query: "))
t = Twitter()
# i = None
chances = 0
fileSave.write(query + "\n")
allChances = 0
for tweet in t.search(query, start=None, count=5):
print tweet.text
# Calc tweet sentiment
sent_int = sent(tweet.text)
sent_str = str(sent_int)
# print sent_str
# Calc author's follower count
follows_int = findFollows(tweet.author)
follows_str = str(sent_int)
# print follows_str
# Calc chances; make cumulative
chances = follows_int * sent_int
print str(chances) + "\n"
# File save
save = sent_str + "\n" + follows_str + "\n \n"
fileSave.write(save)
allChances = allChances + chances
print "OVERALL: " + str(allChances)
示例4: search_with_language_in_region
def search_with_language_in_region(lang, capital_city, search_terms, file_name):
"""
Does a twitter search in the specified language in the area of a given capital city
e.g. search_with_language_in_region('en', 'Paris', '#yoloswag', 'Paris_yoloswag')
Inputs: expects strings for everything.
lang: the language you want to search in [string], e.g. 'en'
capital_city: the city you want to search around, found
through pattern's geocode function, e.g. 'Paris'
search_terms: duh. e.g. ['crimea','putin']
file_name: the file name you want to save the tweets as, will come out as e.g. nealiscool.pickle
Outputs: a pickled dictionary of the tweets, which are saved on disk as tweets_gathered.pickle.
The keys of the dicitonary are the unique tweet IDs.
"""
t = Twitter(language=lang)
tweets_gathered = {}
i = None
for j in range(2):
for tweet in t.search(search_terms, start=i, count=10,geo=geocode(capital_city)[:2]):
print tweet.text
print
i = tweet.id
tweets_gathered[tweet.id] = tweet.text
f = open(file_name,'w')
pickle.dump(tweets_gathered,f)
f.close()
示例5: gettweets
def gettweets(searchterms):
tweetlist = []
from pattern.web import Twitter, plaintext
twitter = Twitter(language='en')
for tweet in twitter.search(searchterms, cached=False):
tweetlist.append(plaintext(tweet.text))
return tweetlist
示例6: get_pattern_data
def get_pattern_data(search_param):
twitter = Twitter(language='en')
for tweet in twitter.search(search_param, cached=True):
print(plaintext(tweet.text).encode('ascii', 'ignore').decode('utf-8'))
g = Graph()
for i in range(10):
for result in twitter.search(search_param, start=i+1,count=50):
s = result.text.lower()
s = plaintext(s)
s = parsetree(s)
p = '{NP} (VP) ' +search_param+ ' {NP}'
for m in search(p, s):
x = m.group(1).string # NP left
y = m.group(2).string # NP right
if x not in g:
g.add_node(x)
if y not in g:
g.add_node(y)
g.add_edge(g[x], g[y], stroke=(0,0,0,0.75)) # R,G,B,A
#if len(g)>0:
# g = g.split()[0] # Largest subgraph.
for n in g.sorted()[:40]: # Sort by Node.weight.
n.fill = (0, 0.5, 1, 0.75 * n.weight)
g.export('data', directed=False, weighted=0.6)
示例7: search_tweets
def search_tweets(self, celeb):
'''
Pull tweets from the Twitter API that mention
the given celebrity
'''
twitter_api = Twitter(language='en')
#TODO: up the count for the final project
return twitter_api.search(celeb, count=3000)
示例8: getTweetsByCoord
def getTweetsByCoord(self, term, lat, lng):
twitter = Twitter(language='en')
tweets = []
for tweet in twitter.search('traffic', geo=(lat, lng)):
tweets.append(tweet.text)
return tweets
示例9: search
def search(text):
list = []
twitter = Twitter(language='en')
for tweet in twitter.search(text, count=30, cached=False):
list.append(tweet.text)
return list
示例10: TwitterStream
def TwitterStream():
# Another way to mine Twitter is to set up a stream.
# A Twitter stream maintains an open connection to Twitter,
# and waits for data to pour in.
# Twitter.search() allows us to look at older tweets,
# Twitter.stream() gives us the most recent tweets.
for trend in Twitter().trends(cached=False):
print trend
# It might take a few seconds to set up the stream.
stream = Twitter().stream("i love", timeout=30)
pos_count=0
neg_count=0
#while True:
for i in range(50):
if(neg_count):
ratio = pos_count / neg_count
else:
ratio = 0
print str(pos_count) + " " + str(neg_count) + " " + str(ratio)+"%"
#print i
#print "+ " + str(pos_count)
#print "- " + str(neg_count)
#print "- - -"
# Poll Twitter to see if there are new tweets.
stream.update()
# The stream is a list of buffered tweets so far,
# with the latest tweet at the end of the list.
for tweet in reversed(stream):
print tweet.text
print tweet.language
sent = pol(tweet.text)
if(sent>0):
pos_count+=1
else:
neg_count+=1
# Clear the buffer every so often.
stream.clear()
# Wait awhile between polls.
time.sleep(1)
print "Final Twitter"
print pos_count
print neg_count
示例11: search
def search(self, args):
"""
Usage:
search [-fty] <keyword>
search -h | --help
Options:
-h --help Show this help message.
-f --facebook Search for keyword on Facebook.
-t --twitter Search for keyword on Twitter.
-y --youtube Search for keyword on YouTube.
"""
# Example args information:
# {'--facebook': False,
# '--help': False,
# '--twitter': True,
# '--youtube': False,
# '': 'f'}
engine = Twitter(language='en')
ret = []
'''
generator = ({
'text': tweet.text,
'author': tweet.author,
'date': tweet.date,
'hashtags': hashtags(tweet.text)
} for tweet in engine.search('is cooler than', count=25, cached=False))
self.db.bulk_insert('test', generator)
'''
for tweet in engine.search('is cooler than', count=25, cached=False):
ret.append({
'text': tweet.text,
'author': tweet.author,
'date': tweet.date,
'hashtags': hashtags(tweet.text)
})
return str(ret)
示例12: Generate_Tweets
def Generate_Tweets(searchterm,filename_label):
twitter_obj=Twitter(license=None, throttle=0.5,language='en')
#throttle: time between requests.
#now the twitter_obj can be searched, with the following parameters.
# Twitter returns up to 1500 results for a search term. It has hourly limit of 150 queries. each call to search() is one query. So you can get like 15 queries of 100 each of 150 queries of 10 each.
# Parameters for Twitter:
# Start 1-1500/count
# count: results per page=1-100
# SORT: RELEVANCY, Limit: 150/hour, throttle =0.5
f=open(filename_label,'a')
for tweet in twitter_obj.search(searchterm,cached=False,language='en', sort ='RELEVANCY',count=100):
unicode_tweet=plaintext(tweet.description)
#Tweets are unicode, need to be converted to ascii before storing in file
ascii_tweet=unicode_tweet.encode('ascii','ignore')
f.write(ascii_tweet+'\n')
f.close()
示例13: get_tweets
def get_tweets(self, search, nb, include_RT, useKey, keys):
if not useKey:
keys = None
twitter = Twitter(
language=self.dico_lang[self.language],
license=keys
)
tweets = list()
if not include_RT:
for tweet in twitter.search(search, start=1, count=nb*3):
if not tweet.text.startswith('RT'):
tweet_input = Input(tweet.text)
annotations = {
'source': 'Twitter',
'author': tweet.author,
'date': tweet.date,
'url': tweet.url,
'search': search,
}
segment = tweet_input[0]
segment.annotations.update(annotations)
tweet_input[0] = segment
tweets.append(tweet_input)
if len(tweets) == nb:
break
else:
for tweet in twitter.search(search, start=1, count=nb):
tweet_input = Input(tweet.text)
annotations = {
'source': 'Twitter',
'author': tweet.author,
'date': tweet.date,
'url': tweet.url,
'search': search,
}
segment = tweet_input[0]
segment.annotations.update(annotations)
tweet_input[0] = segment
tweets.append(tweet_input)
return tweets
示例14: tweetSentiment
class tweetSentiment(object):
def __init__(self, topic, tweetCount):
self.topic = topic
self.tweetCount = tweetCount
self.t = Twitter(language='EN')
self.i = None
def fArray(self):
'''full array including tweet and sentiment'''
fullArray = []
for tweet in self.t.search(self.topic, start=self.i, count = self.tweetCount):
fullArray.append([tweet.text,indicoio.sentiment(tweet.text)])
self.i = tweet.id
return fullArray
def sArray(self):
'''calculate sentiment '''
sentimentArray = []
for tweet in self.t.search(self.topic, start=self.i, count = self.tweetCount):
sentimentArray.append(indicoio.sentiment(tweet.text))
self.i = tweet.id
return sentimentArray
def average(self,numArray):
'''average sentiment'''
return sum(numArray)/len(numArray)
def trending(self):
'''trending sentiment'''
trendArray = []
for trend in Twitter().trends(cached=False):
trendArray.append([trend,indicoio.sentiment(trend)])
return trendArray
示例15: poli_twitter_analysis
def poli_twitter_analysis():
"""This function parses Twitter to determine the average sentiment towards political figures during an event"""
candidates = ['trump','walker', 'fiorina', 'carson', 'cruz', 'rubio', 'huckabee', 'paul', 'kasich','christie', 'bush','clinton','sanders',"o'malley"] #list of searches to use
twtNum = 50 #number of tweets to search for each time
t = Twitter()
i = None
twtstext = []
twtsdate = []
twtsauthor = []
twtscandi = []
twtssenti = []
for item in candidates:
for j in range(1):
for tweet in t.search(item, start=i, count=twtNum):
twtscandi.append(item)
twtstext.append(tweet.text)
m = tweet.text
twtsdate.append(tweet.date)
twtsauthor.append(tweet.author)
[senti,objec] = sentiment(m)
twtssenti.append(senti)
zipped1 = zip(twtscandi, twtssenti)
zipped2 = zip(twtscandi, twtsdate, twtsauthor, twtstext, twtssenti)
timestr = time.strftime("%Y%m%d%H%M%S")
filename = timestr + '.txt'
f = open(filename, 'w')
f.write(' '.join(map(str, zipped1)))
f.close()
filename = 'tweets_' + timestr + '.txt'
f = open(filename, 'w')
f.write(' '.join(map(str, zipped2)))
f.close()
print 'Complete'