本文整理汇总了Python中requests.Session.cookies方法的典型用法代码示例。如果您正苦于以下问题:Python Session.cookies方法的具体用法?Python Session.cookies怎么用?Python Session.cookies使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类requests.Session
的用法示例。
在下文中一共展示了Session.cookies方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: session
# 需要导入模块: from requests import Session [as 别名]
# 或者: from requests.Session import cookies [as 别名]
def session():
global _session
if not _session:
_session = Session()
_session.cookies = cookies.load()
atexit.register(_save_cookies)
return _session
示例2: scrape_site
# 需要导入模块: from requests import Session [as 别名]
# 或者: from requests.Session import cookies [as 别名]
def scrape_site(url, cookie_file="", ses=False, is_rss=False):
from http.cookiejar import LWPCookieJar
from robobrowser import RoboBrowser
from requests import Session
s = Session()
if cookie_file:
s.cookies = LWPCookieJar(cookie_file)
try:
s.cookies.load(ignore_discard=True)
except:
# Cookies don't exsit yet
pass
s.headers['User-Agent'] = 'Mozilla/5.0 (X11; Ubuntu; rv:39.0)'
s.headers['Accept'] = 'text/html'
s.headers['Connection'] = 'keep-alive'
if is_rss:
parser = 'xml'
else:
parser = 'html5lib'
browser = RoboBrowser(session=s,
parser=parser)
browser.open(url)
if ses:
return browser, s
else:
return browser
示例3: getSession
# 需要导入模块: from requests import Session [as 别名]
# 或者: from requests.Session import cookies [as 别名]
def getSession(self):
s = Session()
s.headers.update({'User-Agent': r'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:42.0) Gecko/20100101 Firefox/42.0'})
s.cookies = LWPCookieJar(self.cookieFile)
if os.path.exists(self.cookieFile):
s.cookies.load()
return s
示例4: load_cookie_to
# 需要导入模块: from requests import Session [as 别名]
# 或者: from requests.Session import cookies [as 别名]
def load_cookie_to(session: requests.Session, cookie_path: Optional[str] = None):
cookie_path = cookie_path or default_cookie_path
session.cookies = LWPCookieJar(cookie_path)
if os.path.exists(cookie_path):
session.cookies.load()
logging.info(
"Loaded session from {}".format(os.path.abspath(cookie_path)))
return True
return False
示例5: scrape_site
# 需要导入模块: from requests import Session [as 别名]
# 或者: from requests.Session import cookies [as 别名]
def scrape_site(url, cookie_file=""):
global s
s = Session()
s.headers['User-Agent'] = 'Mozilla/5.0 (X11; Ubuntu; rv:39.0)'
s.headers['Accept'] = 'text/html'
s.headers['Connection'] = 'keep-alive'
if cookie_file:
s.cookies = LWPCookieJar(cookie_file)
try:
s.cookies.load()
if not s.cookies._cookies:
# Cookies have expired
raise Exception
except (FileNotFoundError, Exception):
if os.path.exists(cookie_file):
os.remove(cookie_file)
browser = RoboBrowser(session=s,
parser='html5lib',
timeout=10)
if "sankakucomplex.com" in url:
url_login = "https://chan.sankakucomplex.com/user/login/"
form_num = 0
form_user = "user[name]"
form_password = "user[password]"
username = website_logins['sankakucomplex_username']
password = website_logins['sankakucomplex_password']
browser.open(url_login)
form = browser.get_form(form_num)
form[form_user].value = username
form[form_password].value = password
browser.submit_form(form)
s.cookies.save()
elif "gelbooru.com" in url:
url_login = "http://gelbooru.com/index.php?page=account&s=login&code=00"
form_num = 0
form_user = "user"
form_password = "pass"
username = website_logins['gelbooru_username']
password = website_logins['gelbooru_password']
browser.open(url_login)
form = browser.get_form(form_num)
form[form_user].value = username
form[form_password].value = password
browser.submit_form(form)
s.cookies.save()
browser = RoboBrowser(session=s,
parser='html5lib',
timeout=10)
try:
browser.open(url)
return browser
except:
# TODO: find what exceptions happens here
printf("[WARNING] TIMEOUT WITH WEBSITE: {0}".format(url))
return False
示例6: createSession
# 需要导入模块: from requests import Session [as 别名]
# 或者: from requests.Session import cookies [as 别名]
def createSession(self, cookies):
'''
Creates a global session to be used by all requests
:param cookies:
:return:
'''
session = Session()
adapter = adapters.HTTPAdapter(pool_connections = 1000, pool_maxsize = 5000)
session.mount('https://', adapter)
session.headers = self.headers
session.cookies = cookies
return session
示例7: scrape_site
# 需要导入模块: from requests import Session [as 别名]
# 或者: from requests.Session import cookies [as 别名]
def scrape_site(url, cookie_file=""):
global s
s = Session()
if cookie_file:
s.cookies = LWPCookieJar(cookie_file)
try:
s.cookies.load(ignore_discard=True)
except:
# Cookies don't exist yet
pass
s.headers['User-Agent'] = 'Mozilla/5.0 (X11; Ubuntu; rv:39.0)'
s.headers['Accept'] = 'text/html'
s.headers['Connection'] = 'keep-alive'
browser = RoboBrowser(session=s,
parser='html5lib',
timeout=15)
try:
browser.open(url)
return browser
except:
print("[WARNING] TIMEOUT WITH WEBSITE: {0}".format(url))
return False
示例8: session
# 需要导入模块: from requests import Session [as 别名]
# 或者: from requests.Session import cookies [as 别名]
def session():
global _session
if not _session:
_session = Session()
_session.cookies = cookies.load()
return _session