当前位置: 首页>>代码示例>>Python>>正文


Python Twython.searchTwitter方法代码示例

本文整理汇总了Python中twython.Twython.searchTwitter方法的典型用法代码示例。如果您正苦于以下问题:Python Twython.searchTwitter方法的具体用法?Python Twython.searchTwitter怎么用?Python Twython.searchTwitter使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在twython.Twython的用法示例。


在下文中一共展示了Twython.searchTwitter方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import searchTwitter [as 别名]
def main():
  twitter = Twython()
  count=0

  while(count<10 and conti=='yes'):
    dtnow=datetime.utcnow()
    tm_now=time.time()
    dt_str=str(dtnow.day)+' '+str(dtnow.month)+' '+str(dtnow.year)+' '+str(dtnow.hour)+':'+str(dtnow.minute)+':'+str(dtnow.second) +' +'+str(dtnow.microsecond)

    results = twitter.searchTwitter(q="i love you",since_id=dt_str,rpp='100',result_type='recent')

    res_tim=[]
    if results:
      print '\ngot result',len(results[u'results']),dt_str
      for each in results[u'results']:
        res_tim.append(each[u'created_at'])

    data1=sort_res(res_tim)
    print data1
    data.append(data1[0])

    if count==0:
      ContinueThread().start()
    print 'Do you want to continue(there is a time-out of 10sec):',

    time.sleep(10-int(time.time()-tm_now))
    count+=1

  graph_points(data1)
开发者ID:aravindbhaskar41,项目名称:codejam,代码行数:31,代码来源:emotion.py

示例2: index

# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import searchTwitter [as 别名]
def index(request):
	data =  serializers.serialize("json", parkmarcadores.objects.all())
	data = simplejson.dumps(data)
	t = loader.get_template('templates/index.html')

	twitter = Twython()
	data_t = twitter.searchTwitter(q="#parkea")
	data_t = simplejson.dumps(data_t)
	c = RequestContext(request,{
	'title': 'Parkea',
	'json': data,
	'tweets': data_t,
	})
	return HttpResponse(t.render(c), content_type = 'charset=utf8')
开发者ID:xogost,项目名称:parkea,代码行数:16,代码来源:views.py

示例3: update_urls

# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import searchTwitter [as 别名]
def update_urls(max_id):
    twitter = Twython()
    results = twitter.searchTwitter(q=sys.argv[1], rpp="100", since_id = max_id)
    max_id = results["max_id"]
    for tweet in results["results"]:
        text = tweet["text"]
        links = re.findall(r'(http?://\S+)', text)
        for link in links:
            try:
                url =  link.encode('ascii') 
            except UnicodeEncodeError:
                continue
            if url in urls:
                count = urls[url]
                count += 1
                urls[url] = count
            else:
                urls[url] = 1
    return max_id
开发者ID:naoyamakino,项目名称:MonitorTwitterSearch,代码行数:21,代码来源:monitor.py

示例4: Twython

# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import searchTwitter [as 别名]
# grabbing the last "batch id", if it exists so we
# can make log entries that make SOME sense



for tweet_keyword in harvest_list: # for each keyword, do some shit

        cur.execute("""delete from tweetbanktemp where tweet_keyword = '"""+str(tweet_keyword)+"""'""")
        conn.commit()
        # whack the temp table in case we didn't exit cleanly
	
        #twitter = Twython() 
		twitter = Twython(APP_KEY, APP_SECRET, oauth_version=2)
		ACCESS_TOKEN = twitter.obtain_access_token()

        search_results = twitter.searchTwitter(q=tweet_keyword, rpp="100")
        # our search for the current keyword

        #pp = pprint.PrettyPrinter(indent=3)
        # uncomment for debugging and displaying pretty output

        for tweet in search_results["results"]:
                # some me the tweet, jerry!
                print "        Tweet from @%s Date: %s" % (tweet['from_user'].encode('utf-8'),tweet['created_at'])
                print "        ",tweet['text'].encode('utf-8'),"\n"

                try:
                        # lets try to to put each tweet in our temp table for now
                        cur.execute("""insert into TweetBankTemp (tweet_id, tweet_datetime, tweet_keyword, tweet, tweeter, lang)
                                        values ('"""+str(tweet['id_str'].encode('utf-8').replace("'","''").replace(';',''))+"""',
                                                cast(substring('"""+str(tweet['created_at'].encode('utf-8'))+"""',5,21) as datetime),
开发者ID:arendligtenberg,项目名称:TwitterScripts,代码行数:33,代码来源:tweetgrabber.py

示例5: retornaUltimoTweet

# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import searchTwitter [as 别名]
ultimo = retornaUltimoTweet(c)

# Puxa tweets
twitter = Twython(  twitter_token = CONSUMER_KEY, \
                    twitter_secret = CONSUMER_SECRET, \
                    oauth_token = TOKEN_KEY, \
                    oauth_token_secret = TOKEN_SECRET)

all_results = []
all_inseridos = []
count = 0

for page in range(1, 100):
    results = twitter.searchTwitter(q='''"eu to" OR "me sentindo" OR "estou"''', \
                                    rpp='15', \
                                    page=str(page), \
                                    since_id=str(ultimo), \
                                    result_type='recent')['results']
    if not results:
        break
    all_results.append(results)
    rePrefixo = "^(?!RT).*(eu t(o|ô)|estou|me sentindo).*(?!n(a|ã)o).*"

    # Para cada um dos resultados da busca
    for result in results:
        sentimento_id = defineSentimento(dicio, result['text'], rePrefixo)
        date_time = processaData(result['created_at'])

        # Se identificou sentimento
        if sentimento_id != 0:
            # Se conseguiu inserir do db
开发者ID:arturhoo,项目名称:Como-nos-Sentimos,代码行数:33,代码来源:crawler.py

示例6: Twython

# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import searchTwitter [as 别名]
from twython import Twython

""" Instantiate Twython with no Authentication """
twitter = Twython()
search_results = twitter.searchTwitter(q="WebsDotCom", rpp="50")

for tweet in search_results["results"]:
	print tweet["text"]
开发者ID:EugeneLiang,项目名称:twython,代码行数:10,代码来源:search_results.py

示例7: str

# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import searchTwitter [as 别名]
    lastID.append(0)

startTime = str(datetime.datetime.utcnow())
updateVitals(db_status)

# NOTE: radius is tight enough that we are not worrying about intersections.
# if it expands, it's something to reconsider.
while True:
    for (i, a) in enumerate(range(begin,end+1)):
        airport = code2a[codes[a]]
        radius = '3km'
        geo_area = '%f,%f,%s' % (airport.lat, airport.lon, radius)

        # perform search over geographic area
        try:
            search_results = twitter.searchTwitter(q="", rpp="100", lang="en", geocode=geo_area, since_id=str(lastID[i]), result_type="recent")
        except: # catch all problems
            logging.debug('Error caught, continuing after %d seconds' % (waitTime*60))
            print i, a
            emailAlert()
            printException()
            time.sleep(waitTime)
            twitter = Twython()
            continue

        #print search_results
        try:
            e = search_results['error']
            logging.debug('Error caught: %s' % e)
            logging.debug('Waiting for %d minutes' % waitTime)
            time.sleep(waitTime)
开发者ID:mudblood007,项目名称:RocData,代码行数:33,代码来源:collect-tweets-near-airports.py

示例8: len

# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import searchTwitter [as 别名]
    # just load like 50 tweets, "whatever man" - rintaro kuroiwa
    search_results = None
    numSuccessfulQueries, numTweets = 0, 0

    # periodically query over the keyword search
    while True:
        kstart, kfinish = kstart % len(keywords), kfinish % len(keywords)
        if kstart > kfinish:
            kfinish = len(keywords)
        keywords_slice = keywords[kstart:kfinish]
        query_string = " OR ".join(keywords_slice)
        query_time = None
        logging.debug('\tQuery over string "%s"' % query_string)

        try:
            search_results = twitter.searchTwitter(q=query_string, rpp="100", lang="en", result_type="recent")
            query_time = datetime.utcnow()
        except:
            logging.error("What. I couldn't Twython that.")
            sys.exit(-1)

        # print and save resultssssssssss
        if search_results.has_key("error"):
            e = search_results["error"]
            logging.error("Something bad happened compadre.")
            logging.error("\t%s" % search_results["error"])
            sys.exit(-1)
        elif not search_results.has_key("results"):
            logging.error("No results key was found.")
            logging.error("\tsearch results: %s" % search_results)
            sys.exit(-1)
开发者ID:eyebraus,项目名称:TwitterHealthMental,代码行数:33,代码来源:collect.py

示例9: open

# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import searchTwitter [as 别名]
numTweetsPerQuery = 15

handleToInfo = {}
fin = open('venueID_twitterHandle.txt', 'r')
for line in fin:
    (venueID, twitterHandle, venueName, venueDesc, venueAddr, venueLat, venueLon, numEvents) = line.split('\t')
    handleToInfo[twitterHandle] = {'id': int(venueID), 'twitterHandle': twitterHandle, 'name': venueName, 'description': venueDesc, 'address': venueAddr, 'lat': float(venueLat), 'lon': float(venueLon), 'numEvents': int(numEvents)}

# We won't authenticate for this, but sometimes it's necessary
twitter = Twython()

venues = []
for twitterHandle in handleToInfo.keys():
    user_timeline = twitter.getUserTimeline(screen_name=twitterHandle, rpp=numTweetsPerQuery)
    search_results = twitter.searchTwitter(q=twitterHandle, rpp=numTweetsPerQuery, lang="en")
    venue = handleToInfo[twitterHandle]
    tweetsTimeline = []
    tweetsSearch = []
    for tweet in user_timeline:
        tweetsTimeline.append(tweet)
    for tweet in search_results["results"]:
        tweetsSearch.append(tweet)
    venue['tweetsTimeline'] = tweetsTimeline
    venue['tweetsSearch'] = tweetsSearch
    venues.append(venue)
    
print json.dumps(venues, sort_keys=True)


开发者ID:mudblood007,项目名称:RocData,代码行数:29,代码来源:getUserTimeline.py

示例10: Connection

# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import searchTwitter [as 别名]
from twython import Twython
from pymongo import *
from datetime import *

conn = Connection('localhost')
db = conn.nosqltweets
twitter = Twython(
			twitter_token = '9qtL4OHj9aZpFCWO2TAeig',
			twitter_secret = 'PXggjnvMb89sRc0Jp70LvSjYO4zpBv3dKJ2X1bD2Q',
			oauth_token = '12545732-6KF0EzUOKrCUvM4Cyi4oLsagVD3OomvDyK8yP2HVs',
			oauth_token_secret = 'ufLU2L7F0cQlm0HwE6fQ55B1CV8NiGDab8UMlm1rnI',
	)

types = ['mongodb', 'cassandra', 'couchdb']

for nosql in types:
	search_results = twitter.searchTwitter(q='#%s'%(nosql), rpp="100")
	for tweet in search_results["results"]:
		collection = getattr(db, nosql)
		collection.ensure_index('id_str', unique=True)
		from_user = tweet['from_user'].encode('utf-8')
		text = tweet['text']
		created_at = datetime.strptime(tweet['created_at'], "%a, %d %b %Y %H:%M:%S +0000")
		id_str = tweet['id_str']
		post = { 'id_str': id_str, 'from_user': from_user, 'created_at': created_at } 
		collection.insert(post)
开发者ID:bcarpio,项目名称:nosqltweets,代码行数:28,代码来源:search_results.py

示例11: timedelta

# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import searchTwitter [as 别名]
yesterday = date.today() - timedelta(1)
yesterday = yesterday.strftime("%Y-%m-%d")

# get the last tweet from yesterday, so we can get all the tweets from today only
# *** need to fix this ***
# search_results = twitter.searchTwitter(q="#phdchat", page="1", until=yesterday)
# since_id = search_results["results"][0]["id"]

# get tweets
print "Retrieving tweets..."
count = 0
next_page = 1
while next_page:
    # get search results
    # see https://dev.twitter.com/doc/get/search for query information
    search_results = twitter.searchTwitter(q="#phdchat", rpp="100", page=str(page_num))  # , since_id=str(since_id))

    # simple way of catching end of all tweets
    try:
        search_results["next_page"]
    except:
        next_page = 0

        # get search results keys
        # print search_results.keys() gives us:
        # [u'next_page', u'completed_in', u'max_id_str', u'since_id_str', u'refresh_url', u'results', u'since_id', u'results_per_page', u'query', u'max_id', u'page']
    try:
        search_results["error"]
        next_page = 0
    except:
        results = search_results["results"]
开发者ID:radaniba,项目名称:bioinformatics-scripts,代码行数:33,代码来源:phdchat_aggregator.py

示例12: string

# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import searchTwitter [as 别名]
#!/usr/local/bin/python
#: Title 	: twitter
#: Date 	: 2012-10-13
#: Author 	: Garot Conklin
#: Version 	: 1.00
#: Description 	: Generalized commandline Twitter searcher
#: Options 	: search string(s)
#: BUG 		: NON-Production
#: Return codes : 0
#: 		: 0 = successful
#: 		: 1 = failure
#: 		:-1 = ambiguous; no execution
#:
import os
from twython import Twython

term = raw_input("Enter Twitter search terms: ")

""" Instantiate Twython with no Authentication """
twitter = Twython()
search_results1 = twitter.searchTwitter(q="%s" % term, rpp="100")
print "=" * 50 + " Yahoo! Twitter Search for: %s " % term + "=" * 50
print " "
for site in search_results1["results"]:
    print site["created_at"], site["text"], site["from_user"]
print " "
print "=" * 56 + " END " + "=" * 57
开发者ID:gmconklin,项目名称:Universal,代码行数:29,代码来源:twitter.py

示例13: Twython

# 需要导入模块: from twython import Twython [as 别名]
# 或者: from twython.Twython import searchTwitter [as 别名]
        except IndentationError:
            pass
        except:
            pass


# Main objects
twitter = Twython(twitter_token, twitter_secret, oauth_token, oauth_token_secret)
con = mdb.connect(host, user, password, db)

query_subjects = ["Super Tuesday", "Romney", "Santorum", "Ron Paul", "Gingrich", "Obama"]

for query in query_subjects:
    table_name = query if query != "Super Tuesday" else "SuperTuesday"
    for page_num in range(1,4):
        search_results = twitter.searchTwitter(q=query, rpp="100", result_type="current", page=str(page_num))
        for tweet in search_results["results"]:
            tweet_id = tweet['id_str']
            user = tweet['from_user'].encode('utf-8')
            user_id = tweet['from_user_id_str']
            created_at = (parser.parse(tweet['created_at']))
            text = tweet['text'].encode('utf-8')
            if '"' in text:
                text = text.replace('"', '\'')
            geo = tweet['geo']
            coordinates = geo['coordinates'] if geo != None else "null"
            scraped_at = datetime.now()
            source = tweet['source']
            print text
            #insertToDB(con, table_name, tweet_id, user, user_id, created_at, text, geo, coordinates, scraped_at, source);
开发者ID:AKGP,项目名称:Independent-Study,代码行数:32,代码来源:scrape_tweets.py


注:本文中的twython.Twython.searchTwitter方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。