本文整理汇总了Python中twython.Twython.search方法的典型用法代码示例。如果您正苦于以下问题:Python Twython.search方法的具体用法?Python Twython.search怎么用?Python Twython.search使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类twython.Twython
的用法示例。
在下文中一共展示了Twython.search方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: performTwitterSearch
# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import search [as 别名]
def performTwitterSearch(consumer_key,consumer_secret,access_token_key,access_token_secret,languages):
twitter = Twython(consumer_key, consumer_secret, access_token_key, access_token_secret)
register(APPLICATION_ID, REST_API_KEY)
gs = goslate.Goslate()
for language in languages:
maxId = 999999999999999999999999
for page in range(1,11):
print('page = ' + str(page))
print('maxId = ' + str(maxId))
if page == 1:
query = twitter.search(q=language,src='typd',count=100)
else:
query = twitter.search(q=language,src='typd',count=100,max_id=maxId)
for result in query['statuses']:
text = result['text']
id = result['id']
if id != maxId:
newTweet = Tweet(Text=text, Language=gs.detect(text), tweetId=str(id))
newTweet.save()
if id < maxId:
maxId = id
print('changed maxId to ' + str(maxId))
示例2: count
# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import search [as 别名]
def count():
#Connection established with Twitter API v1.1
twitter = Twython(Consumer_Key, Consumer_Secret, Access_Token, Access_Token_Secret);
#Twitter is queried
response = twitter.search(q=keyword, count=tweetsXiteration, since=dateFrom, until=dateTo, result_type='mixed');
#Results (partial)
countTweets = len(response['statuses']);
#If all the tweets have been fetched, then we are done
if not ('next_results' in response['search_metadata']):
done = True;
#If not all the tweets have been fetched, then...
while (done == False):
#Parsing information for maxID
parse1 = response['search_metadata']['next_results'].split("&");
parse2 = parse1[0].split("?max_id=");
parse3 = parse2[1];
maxID = parse3;
#Twitter is queried (again, this time with the addition of 'max_id')
response = twitter.search(q=keyword, count=tweetsXiteration, since=dateFrom, until=dateTo, max_id=maxID, include_entities=1, result_type='mixed');
#Updating the total amount of tweets fetched
countTweets = countTweets + len(response['statuses']);
#If all the tweets have been fetched, then we are done
if not ('next_results' in response['search_metadata']):
done = True;
return countTweets
示例3: __init__
# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import search [as 别名]
class SocialTrends:
def __init__(self, authors, title, urls):
self.authors = authors
self.title = title
self.urls = urls
# hackathon hack!
if not len(authors):
self.authors = [""]
if not len(urls):
self.urls = ["NOT_APPLICABLE_90125738943705"]
if (authors[0], title) in social_cache:
self.twitter_results = social_cache[(authors[0], title, urls[0])]['twitter']
return
social_cache[(self.authors[0], self.title, self.urls[0])] = {}
(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) = KEYS.keys()
self.tw = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
self.twitter_results = ""
self.reddit_results = ""
def paper_tweets(self):
if self.twitter_results:
return self.twitter_results
#allnames = author.split() # possible author names
#lastname = allnames[len(allnames)-1]
tweets = []
trend_score = 0
# possible urls
for url in self.urls:
url = url.lstrip("http://").lstrip("https://").lstrip("www.")
results = self.tw.search(q=url, count=50)
if results['statuses']:
(new_tweets, rt_counts) = zip(*[(status['id'], status['retweet_count']) for status in results['statuses']])
tweets += new_tweets
trend_score += sum(rt_counts)
# search title + author
#TODO count retweets and use in ranking
for author in self.authors:
results = self.tw.search(q="\"" + self.title + "\" " + author, count=50)
if results['statuses']:
(new_tweets, rt_counts) = zip(*[(status['id'], status['retweet_count']) for status in results['statuses']])
tweets += new_tweets
trend_score += sum(rt_counts)
#TODO see if there are any new urls to check in results?
#TODO search author + title fragments
#TODO better scoring system please
trend_score += len(tweets)
#TODO ID top tweets
top_tweets = [(t, self.tw.show_status(id=t)) for t in tweets[:30]]
self.twitter_results = (trend_score, top_tweets)
social_cache[(self.authors[0], self.title, self.urls[0])]['twitter'] = self.twitter_results
pickle.dump(social_cache, open(PICKLE_FILE,'wb'))
return self.twitter_results
示例4: getTweets
# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import search [as 别名]
def getTweets(text, max_querys):
twitter = Twython(APP_KEY, access_token=ACCESS_TOKEN)
filtered = list()
filtered_no_duplicates = list()
since_id = 0
# Voy a hacer MAX_QUERYS al search de twitter
for i in range(1,max_querys):
print(i)
time.sleep(TIME_SLEEP)
if since_id == 0:
results = twitter.search(q=text, count='100',result_type='recent', geocode='-34.5965096,-58.3671446,5000km')
# results = twitter.search(q=text, count='100',result_type='recent')
else:
results = twitter.search(q=text, count='100',result_type='recent',max_id=since_id, geocode='-34.5965096,-58.3671446,3000km')
# results = twitter.search(q=text, count='100',result_type='recent',max_id=since_id)
for result in results['statuses']:
# Lo tengo que hacer manualmente por que la api no me esta devolviendo el since_id
if (result['id'] < since_id or since_id == 0):
since_id = result['id']
if result.get('geo'):
if result.get('geo').get('type') == 'Point':
filtered.append(result)
for twit in filtered:
if twit not in filtered_no_duplicates:
filtered_no_duplicates.append(twit)
create_csv(filtered_no_duplicates)
return filtered_no_duplicates
示例5: search_twitter
# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import search [as 别名]
def search_twitter(search_term, geocode=None):
twitter = Twython(ckey, csecret)
if geocode:
result_search = twitter.search(q=search_term, geocode=geocode)
else:
result_search = twitter.search(q=search_term)
return result_search
示例6: search_twitter
# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import search [as 别名]
def search_twitter(self, search_term, geocode=None):
twitter = Twython(APP_KEY, APP_SECRET)
if geocode:
result_search = twitter.search(q=search_term, geocode=geocode)
else:
result_search = twitter.search(q=search_term)
return result_search
示例7: perform_import
# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import search [as 别名]
def perform_import(self, retrieval_param, course_code):
# Setup Twitter API Keys
app_key = os.environ.get("TWITTER_APP_KEY")
app_secret = os.environ.get("TWITTER_APP_SECRET")
oauth_token = os.environ.get("TWITTER_OAUTH_TOKEN")
oauth_token_secret = os.environ.get("TWITTER_OAUTH_TOKEN_SECRET")
twitter = Twython(app_key, app_secret, oauth_token, oauth_token_secret)
count = 0
next_max_id = None
results = None
while True:
try:
if count==0:
results = twitter.search(q=retrieval_param,count=100, result_type='mixed')
else:
results = twitter.search(q=retrieval_param,count=100,max_id=next_max_id, result_type='mixed')
for tweet in results['statuses']:
self.insert_tweet(tweet, course_code)
if 'next_results' not in results['search_metadata']:
break
else:
next_results_url_params = results['search_metadata']['next_results']
next_max_id = next_results_url_params.split('max_id=')[1].split('&')[0]
count += 1
except KeyError:
# When there are no more pages (['paging']['next']), break from the
# loop and end the script.
break
示例8: get_tweets
# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import search [as 别名]
def get_tweets(hashtag, tweetCount):
twitter = Twython(APP_KEY, APP_SECRET, ACCESS_TOKEN, ACCESS_SECRET)
tweets = []
corpora = ''
wordCnt = Counter()
# Max amount of queries is 15
MAX_ATTEMPTS = 15
counter =0
for i in range(0, MAX_ATTEMPTS):
if tweetCount <= len(tweets):
break # found all tweets
if (i == 0):
results = twitter.search(q=hashtag, count=tweetCount - len(tweets), lang='en')
else:
results = twitter.search(q=hashtag, count=tweetCount - len(tweets), lang='en', include_entities='true', max_id=next_max_id)
for result in results['statuses']:
tweet = str(result['text'].encode(sys.stdout.encoding, errors='ignore'))
#Remove poster's address
tweet = re.sub('@[A-Za-z_0-9:]+ ', ' ', str(result['text'].encode(sys.stdout.encoding, errors='ignore')))
#Removal of special Twitter symbols
tweet = re.sub('(RT|RT |RT:|RT :|RT :)', '', tweet)
tweet = re.sub('b\'', '', tweet)
tweet = re.sub('b"', '', tweet)
tweet = re.sub('\w\\?\\[A-Za-z0-9]* ', ' ', tweet)
tweet = re.sub('&', ' ', tweet)
tweet = re.sub('>', '>', tweet)
tweet = re.sub('\\n', ' ', tweet)
tweet = re.sub('\n', ' ', tweet)
tweet = tweet.replace("\n", ' ') #Does the work for some reason
tweet = tweet.replace("\\n", ' ') #Does the work for some reason
tweet = re.sub('\\*\\n/g', ' ', tweet)
tweet = re.sub('http[^\s]*', '', tweet) #matches all words that starts with http
tweet = re.sub('\s\s+', ' ', tweet)
tweet = tweet.strip() #Removes all leading and ending spaces
#Remove ' and " at the end of a tweet
if(tweet[-1]=='\'' or tweet[-1]=='"'):
tweet = tweet[:len(tweet)-1]
tweets.append(tweet.strip())
corpora += tweet
try:
next_results_url_params = results['search_metadata']['next_results']
next_max_id = next_results_url_params.split('max_id=')[1].split('&')[0]
except:
break
return tweets, wordCnt, corpora
示例9: main
# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import search [as 别名]
def main():
twitter = Twython(APP_KEY, access_token=ACCESS_TOKEN)
latest_id = twitter.search(q=hashtag, result_type='recent')['statuses'][1]['id']
while 1:
logger.debug("Looking for new tweets")
try:
result = twitter.search(q=hashtag, result_type='recent', since_id=latest_id)
statuses = result['statuses']
if len(statuses) > 0:
contains_python = False
contains_java = False
contains_net = False
tweet = statuses[0]
latest_id = tweet['id']
logger.info("New tweet!")
hashtags = tweet['entities']['hashtags']
for hashtag_ in hashtags:
if hashtag_['text'].lower() == 'python':
contains_python = True
if hashtag_['text'].lower() == 'java':
contains_java = True
if hashtag_['text'].lower() == 'dotnet':
contains_net = True
if contains_python and contains_java and contains_net:
antialisedPoint(ledStrip, [255, 0, 0], 0.5, 0.3, 0.01)
antialisedPoint(ledStrip, [0, 255, 0], 0.5, 0.3, 0.01)
antialisedPoint(ledStrip, [0, 0, 255], 0.5, 0.3, 0.01)
elif contains_python:
logger.info("Python!")
antialisedPoint(ledStrip, [0, 255, 0], 0.5, 0.3, 0.1)
elif contains_java:
logger.info("JAVA!")
antialisedPoint(ledStrip, [0, 0, 255], 0.5, 0.3, 0.1)
elif contains_net:
logger.info(".NOT!")
antialisedPoint(ledStrip, [255, 0, 0], 0.5, 0.3, 0.1)
sleep(6)
except (TwythonError, TwythonRateLimitError) as e:
logger.warning('Error querying twitter')
logger.warning(e.msg)
if hasattr(e, 'retry_after') and e.retry_after is not None:
logger.warning('Got told to wait %s seconds before retrying', e.retry_after)
sleep(e.retry_after)
else:
logger.warning('Trying to wait five minutes and see if that helps')
sleep(300)
示例10: main
# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import search [as 别名]
def main():
acctDict = get_api_keys()
twitter = Twython(acctDict['ConsumerKey'], acctDict['ConsumerSecret'],
acctDict['AccessToken'], acctDict['AccessTokenSecret'])
wantSearch = twitter.search(q='#SwappWant', count=10, geocode=GEOCODE)
haveSearch = twitter.search(q='#SwappHave', count=10, geocode=GEOCODE)
search = twitter.search(q='', count=100)
tweets = search['statuses']
for tweet in tweets:
print(tweet['text'].encode('utf-8'), '\n')
示例11: twitter_count
# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import search [as 别名]
def twitter_count():
#Set parameters
keyword = 'kickstarter'; #The desired keyword(s)
tweetsXiteration = 100; #Where 100 is the max
d = datetime.timedelta(days=1)
today = datetime.date.today()
yesterday = today - d
dateFrom = yesterday.strftime("%Y-%m-%d"); #Inclusive (YYYY-MM-DD)
dateTo = today.strftime("%Y-%m-%d"); #Exclusive (YYYY-MM-DD)
done = False; #Must be false
#Setting the OAuth
Consumer_Key = '0Io4CpjhioOrhvsUV4JFNDhER';
Consumer_Secret = '0CIK7WinLIys6RqrthW4tZ1naKqT5t0xkEkUD7mgjhkjETxKt2';
Access_Token = '297465676-tYLgZxyI5tUxkBOZkzu2LR3jNKZXQhLopPpCwmJi';
Access_Token_Secret = 'lDmvxgN0mpxgXEVjQrqROGPwvv4xvctiyGCNLnhmPOEsY';
#Connection established with Twitter API v1.1
twitter = Twython(Consumer_Key, Consumer_Secret, Access_Token, Access_Token_Secret);
#Twitter is queried
response = twitter.search(q=keyword, count=tweetsXiteration, since=dateFrom, until=dateTo, result_type='mixed');
#Results (partial)
countTweets = len(response['statuses']);
#If all the tweets have been fetched, then we are done
if not ('next_results' in response['search_metadata']):
done = True;
#If not all the tweets have been fetched, then...
while (done == False):
#Parsing information for maxID
parse1 = response['search_metadata']['next_results'].split("&");
parse2 = parse1[0].split("?max_id=");
parse3 = parse2[1];
maxID = parse3;
#Twitter is queried (again, this time with the addition of 'max_id')
response = twitter.search(q=keyword, count=tweetsXiteration, since=dateFrom, until=dateTo, max_id=maxID, include_entities=1, result_type='mixed');
#Updating the total amount of tweets fetched
countTweets = countTweets + len(response['statuses']);
#If all the tweets have been fetched, then we are done
if not ('next_results' in response['search_metadata']):
done = True;
return countTweets
示例12: __init__
# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import search [as 别名]
class twython_api:
def __init__(self):
self.TWITTER = Twython(settings.APP_KEY, access_token=settings.ACCESS_TOKEN)
def search(self, word, type=None, max=None):
# Only search
if type == None and max == None:
data = self.TWITTER.search(q=word)
return data
# Search with Recent Type
elif max == None:
# result_type:
# Specifies what type of search results you would prefer to receive.
# The current default is 'mixed'.
# Valid values include:
# mixed: Include both popular and real time results in the response.
# popular: Return only the most recent results in the response.
# popular: Return only the most popular results in the response.
data = self.TWITTER.search(q=word, result_type=type)
return data
# Search with Recent Type and Count
else:
# count:
# The number of tweets to return per page, up to a maximum of 100.
# Defaults to 15.
# This was formerly the 'rpp' parameter in the old Search API.
data = self.TWITTER.search(q=word, result_type=type, count=max)
return data
# Returns Generator
def search_gen(self, word):
# This function offers a generator for search results
data = self.TWITTER.search_gen(word)
return data
# Use of search_gen
# twitter = twython_api()
# data = twitter.search_gen('#bitcoin')
# for result in data:
# print(result)
# Don't use for search_gen. Only use for search.
def get_tweet_list_by_html(self, data):
tweet_list = []
for result in data['statuses']:
result['text'] = Twython.html_for_tweet(result, use_display_url=True, use_expanded_url=True)
tweet_list.append(result['text'])
return tweet_list
示例13: Collect_Tweets_by_Hashtag
# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import search [as 别名]
class Collect_Tweets_by_Hashtag(object):
def __init__(self, searchterm):
self.searchterm = searchterm
self.filename = '.'.join(['TwitterOutput',searchterm,'csv'])
self.max_id = None
def _call_Twitter_api(self):
twitter_api_info = json.load(open('/Users/zephryin/.api/Twitter_API.json'))
CONSUMER_KEY = twitter_api_info['API Key']
CONSUMER_SECRET = twitter_api_info['API Secret']
self.twitterAPI = Twython(CONSUMER_KEY, CONSUMER_SECRET)
def get_twitter(self, counts = 100):
self._call_Twitter_api()
for r in range(counts):
print (r)
if self.max_id is None:
self.tweets = self.twitterAPI.search(q = self.searchterm,
count = 100, lang='en')
else:
self.tweets = self.twitterAPI.search(q = self.searchterm, count = 100, max_id = self.max_id,include_entities = self.entities)
self._write_file(self.tweets)
if 'next_results' in self.tweets['search_metadata']:
self._get_twitter_search_options(self.tweets)
else:
break
def _write_file(self, tweets):
print ("writing to file")
with open(self.filename, 'a') as fh:
for i in range(len(tweets['statuses'])):
if str(tweets['statuses'][i]['metadata']['iso_language_code']) == 'en':
screenName = tweets['statuses'][i]['user']['screen_name']
tweet_text = tweets['statuses'][i]['text']
statusCount = tweets['statuses'][i]['user']['statuses_count']
line = [screenName,str(statusCount), tweet_text ]
fh.write(','.join([x.encode('UTF8') for x in line]))
def _get_twitter_search_options(self, tweets):
meta = str(tweets['search_metadata']['next_results'])
m = re.search('max_id=(\d+).*include_entities=(\d+)', meta)
self.max_id = m.group(1)
self.entities = m.group(2)
示例14: TwitterHandler
# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import search [as 别名]
class TwitterHandler(object):
def __init__(self):
self.consumer_key = CONSUMER_KEY
self.consumer_secret = CONSUMER_SECRET
self.access_token = OAUTH_TOKEN
self.access_token_secret = OAUTH_SECRET
self.auth = tweepy.OAuthHandler(self.consumer_key, self.consumer_secret)
self.auth.set_access_token(self.access_token, self.access_token_secret)
self.api = tweepy.API(self.auth)
self.twython = Twython(CONSUMER_KEY, CONSUMER_SECRET, OAUTH_TOKEN, OAUTH_SECRET)
def post(self, message):
if len(message) <= 140:
try:
self.api.update_status(message)
except:
pass
def get_data(self, keyword, length):
ans = []
try:
result_set = self.twython.search(q = keyword, count = length)
tweets = result_set['statuses']
for tweet in tweets:
body = tweet['text']
user_object = tweet['user']
new_object = Tweet(user_object, body)
ans.append(new_object)
return ans
except:
return ans
示例15: parseTweets
# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import search [as 别名]
def parseTweets(username, hashtag):
try:
from twython import Twython
twitter = Twython()
if(username is not None):
tweets = twitter.getUserTimeline( screen_name = username )
for t in tweets:
it = Item(title=texto2Unicode(t["text"]),
desc=texto2Unicode(t["text"]),
author=texto2Unicode(t["user"]["screen_name"]),
category=texto2Unicode("twitter"),
link="",
img=texto2Unicode(t["user"]["profile_image_url_https"]),
pub_date=texto2Unicode(t["created_at"]))
checkAndSaveEntry(it)
if(hashtag is not None):
twhash = twitter.search(q = hashtag)
for t in twhash["results"]:
it = Item(title=texto2Unicode(t["text"]),
desc=texto2Unicode(t["text"]),
author=texto2Unicode(t["from_user"]),
category=texto2Unicode("twitter"),
link="",
img=texto2Unicode(t["profile_image_url_https"]),
pub_date=texto2Unicode(t["created_at"]))
checkAndSaveEntry(it)
except Exception, e:
print("ExceptionTW: %s" %e)
return 0