本文整理汇总了Python中twitter__login.login函数的典型用法代码示例。如果您正苦于以下问题:Python login函数的具体用法?Python login怎么用?Python login使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了login函数的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self):
self.counter = 0
self.fprefix = ""
self.output = None
self.twitter_rest = twitter__login.login()
self.twitter_stream = twitter.TwitterStream(auth=twitter.oauth.OAuth(access_token, access_token_secret,consumer_key, consumer_secret))
self.fname = None
self.ufname = None
示例2: __init__
def __init__(self):
self.listOfFiles = None
self.listOfUsers = []
self.listOfUserList = [] #(id, title, des)
self.twitter_rest = twitter__login.login()
self.listOfListsMetaInfo = []
self.aspect = None
self.u = Utils.Utils()
pass
示例3: get_stream
def get_stream(self,TIMELINE_NAME,MAX_PAGES):
USER = None
KW = { # For the Twitter API call
'count': 200,
'trim_user': 'true',
'include_rts' : 'true',
'since_id' : 1,
}
if TIMELINE_NAME == 'user':
USER = sys.argv[3]
KW['screen_name'] = USER
if TIMELINE_NAME == 'home' and MAX_PAGES > 4:
MAX_PAGES = 4
if TIMELINE_NAME == 'user' and MAX_PAGES > 16:
MAX_PAGES = 16
t = login()
client = MongoClient('localhost',27017)
db = client.test_database
posts = db.tw_data #Collection name
posts.drop()
api_call = getattr(t.statuses, TIMELINE_NAME + '_timeline')
tweets = makeTwitterRequest(api_call, **KW)
for tweet in tweets:
if(tweet['lang']=='en'):
#print tweet['text']
post_id = posts.insert(tweet)
#print '# post id'
#print post_id
#print 'Fetched %i tweets' % len(tweets)
page_num = 1
while page_num < MAX_PAGES and len(tweets) > 0:
# Necessary for traversing the timeline in Twitter's v1.1 API.
# See https://dev.twitter.com/docs/working-with-timelines
KW['max_id'] = getNextQueryMaxIdParam(tweets)
api_call = getattr(t.statuses, TIMELINE_NAME + '_timeline')
tweets = makeTwitterRequest(api_call, **KW)
#print json.dumps(tweets,indent = 3)
for tweet in tweets:
if(tweet['lang']=='en'):
#print tweet['text']
post_id = posts.insert(tweet)
#print '# post id'
#print post_id
#print 'Fetched %i tweets' % len(tweets)
page_num += 1
示例4: load_followers
def load_followers():
#for each user extracted getusers method , load followers from twitter
t=twitter__login.login()
in_file=open('users.txt','rb')
count=1
count2=1
#twitter only allows 15 users's followers upload once every 30 min ,so this method is run a complete with adjustments in the count variable
for row in in_file:
name=row.strip()
if (count>=47):
followers=t.followers.ids(screen_name=name)
#for every user obtain list of followers an store in follower text file (use user name for identifcation)
out_file=name+".txt"
out=open(out_file,'w')
pickle.dump(followers['ids'],out)
out.close()
count2+=1
count+=1
#tried adding a time variable to sleep after every 15 users, worked sometimes
if(count2%15==0):
time.sleep(180)
count2=1
示例5: login
# -*- coding: utf-8 -*-
import sys
import json
import redis
from twitter__login import login
# A makeTwitterRequest call through to the /users/lookup
# resource, which accepts a comma separated list of up
# to 100 screen names. Details are fairly uninteresting.
# See also http://dev.twitter.com/doc/get/users/lookup
from twitter__util import getUserInfo
if __name__ == "__main__":
screen_names = sys.argv[1:]
t = login()
r = redis.Redis()
print json.dumps(
getUserInfo(t, r, screen_names=screen_names),
indent=4
)
示例6: auth_to_twitter
def auth_to_twitter():
return twitter__login.login()
示例7: login
KW = { # For the Twitter API call
'count': 200,
'trim_user': 'true',
'include_rts' : 'true',
'since_id' : 1,
}
if TIMELINE_NAME == 'user':
USER = sys.argv[3]
KW['screen_name'] = USER
if TIMELINE_NAME == 'home' and MAX_PAGES > 4:
MAX_PAGES = 4
if TIMELINE_NAME == 'user' and MAX_PAGES > 16:
MAX_PAGES = 16
t = login()
# Establish a connection to a CouchDB database
server = couchdb.Server('http://localhost:5984')
DB = 'tweets-%s-timeline' % (TIMELINE_NAME, )
if USER:
DB = '%s-%s' % (DB, USER)
try:
db = server.create(DB)
except couchdb.http.PreconditionFailed, e:
# Already exists, so append to it, keeping in mind that duplicates could occur
db = server[DB]
示例8: GetIds
def GetIds():
#obtain the ids for every uses, these are to be used to create nodes and edges that will be used to
t=twitter__login.login()
file_1=open('users.txt','rb')
file_2=open('user_id.txt','w')
示例9: login
def login(self): # we make no aport here
return login()
示例10: on_stall_warning
return True # Don't kill the stream
def on_stall_warning(self, status):
print "Got Stall Warning message",str(status)
return True # Don't kill the stream
try:
# my config is hard coded
fn = os.path.join(os.environ['HOME'],'conf', 'twitter_mining.cfg')
config = ConfigParser.RawConfigParser()
config.read(fn)
while True:
try:
# oauth dance
auth = login(config)
# Create a streaming API and set a timeout value of 1 minute
streaming_api = tweepy.streaming.Stream(auth, CustomStreamListener(), timeout=60, secure=True)
Q = sys.argv[2:]
print "Track parameters",str(Q)
streaming_api.filter(follow=None, track=Q)
except Exception, ex:
err = "'%s' '%s' Error '%s' '%s'"%(dbname, str(datetime.now()), str(ex), get_trace())
print err
file('errors.txt','a').write(err+'\n')
finally:
print "disconnecting..."
streaming_api.disconnect()
# time.sleep(60)
except KeyboardInterrupt:
print "got keyboardinterrupt"
示例11: getFile
def getFile(fname):
try:
f = open(fname)
d = json.load(f)
f.close()
return d
except:
return None
def saveFile(d):
print "Saving!"
f = open(d["fname"],'w')
json.dump(d,f)
f.close()
t = twitter__login.login()
screen_name = 'AdventureSauce1'
response = t.users.show(screen_name=screen_name)
user_id = response['id']
dname = screen_name+'_net.json'
d = getFile(dname)
if d is None:
d = dict()
d["fname"] = dname
specialcases = []
crawlUser(user_id,d,t)
handleSpecialCases(t,d,specialcases)
saveFile(d)