当前位置: 首页>>代码示例>>Python>>正文


Python Session.headers['User-Agent']方法代码示例

本文整理汇总了Python中requests.Session.headers['User-Agent']方法的典型用法代码示例。如果您正苦于以下问题:Python Session.headers['User-Agent']方法的具体用法?Python Session.headers['User-Agent']怎么用?Python Session.headers['User-Agent']使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在requests.Session的用法示例。


在下文中一共展示了Session.headers['User-Agent']方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: scrape_site

# 需要导入模块: from requests import Session [as 别名]
# 或者: from requests.Session import headers['User-Agent'] [as 别名]
def scrape_site(url, cookie_file="", ses=False, is_rss=False):
    from http.cookiejar import LWPCookieJar
    from robobrowser import RoboBrowser
    from requests import Session
    s = Session()
    if cookie_file:
        s.cookies = LWPCookieJar(cookie_file)
        try:
            s.cookies.load(ignore_discard=True)
        except:
            # Cookies don't exsit yet
            pass
    s.headers['User-Agent'] = 'Mozilla/5.0 (X11; Ubuntu; rv:39.0)'
    s.headers['Accept'] = 'text/html'
    s.headers['Connection'] = 'keep-alive'
    if is_rss:
        parser = 'xml'
    else:
        parser = 'html5lib'
    browser = RoboBrowser(session=s,
                          parser=parser)
    browser.open(url)
    if ses:
        return browser, s
    else:
        return browser
开发者ID:Kuppey,项目名称:Shiinabot,代码行数:28,代码来源:utils.py

示例2: _create_session

# 需要导入模块: from requests import Session [as 别名]
# 或者: from requests.Session import headers['User-Agent'] [as 别名]
    def _create_session(self):
        method = self.auth_params['method']
        session = Session()
        session.headers['User-Agent'] = self.user_agent
        if method != 'oauth2-resourceowner':
            session.params = {'api_key': self.auth_params['api_key']}
        if self.json:
            session.headers['Accept'] = ACCEPT_HEADERS['json']

        Connection.__setattr__(self, 'session', session)
开发者ID:MediaMath,项目名称:t1-python,代码行数:12,代码来源:connection.py

示例3: scrape_site

# 需要导入模块: from requests import Session [as 别名]
# 或者: from requests.Session import headers['User-Agent'] [as 别名]
def scrape_site(url, cookie_file=""):
    global s
    s = Session()
    s.headers['User-Agent'] = 'Mozilla/5.0 (X11; Ubuntu; rv:39.0)'
    s.headers['Accept'] = 'text/html'
    s.headers['Connection'] = 'keep-alive'
    if cookie_file:
        s.cookies = LWPCookieJar(cookie_file)
        try:
            s.cookies.load()
            if not s.cookies._cookies:
                # Cookies have expired
                raise Exception
        except (FileNotFoundError, Exception):
            if os.path.exists(cookie_file):
                os.remove(cookie_file)
            browser = RoboBrowser(session=s,
                                  parser='html5lib',
                                  timeout=10)
            if "sankakucomplex.com" in url:
                url_login = "https://chan.sankakucomplex.com/user/login/"
                form_num = 0
                form_user = "user[name]"
                form_password = "user[password]"
                username = website_logins['sankakucomplex_username']
                password = website_logins['sankakucomplex_password']
                browser.open(url_login)
                form = browser.get_form(form_num)
                form[form_user].value = username
                form[form_password].value = password
                browser.submit_form(form)
                s.cookies.save()
            elif "gelbooru.com" in url:
                url_login = "http://gelbooru.com/index.php?page=account&s=login&code=00"
                form_num = 0
                form_user = "user"
                form_password = "pass"
                username = website_logins['gelbooru_username']
                password = website_logins['gelbooru_password']
                browser.open(url_login)
                form = browser.get_form(form_num)
                form[form_user].value = username
                form[form_password].value = password
                browser.submit_form(form)
                s.cookies.save()
    browser = RoboBrowser(session=s,
                          parser='html5lib',
                          timeout=10)
    try:
        browser.open(url)
        return browser
    except:
        # TODO: find what exceptions happens here
        printf("[WARNING] TIMEOUT WITH WEBSITE: {0}".format(url))
        return False
开发者ID:Okonorn,项目名称:AcePictureBot,代码行数:57,代码来源:utils.py

示例4: scrape_site

# 需要导入模块: from requests import Session [as 别名]
# 或者: from requests.Session import headers['User-Agent'] [as 别名]
def scrape_site(url, cookie_file=""):
    global s
    s = Session()
    if cookie_file:
        s.cookies = LWPCookieJar(cookie_file)
        try:
            s.cookies.load(ignore_discard=True)
        except:
            # Cookies don't exist yet
            pass
    s.headers['User-Agent'] = 'Mozilla/5.0 (X11; Ubuntu; rv:39.0)'
    s.headers['Accept'] = 'text/html'
    s.headers['Connection'] = 'keep-alive'
    browser = RoboBrowser(session=s,
                          parser='html5lib',
                          timeout=15)
    try:
        browser.open(url)
        return browser
    except:
        print("[WARNING] TIMEOUT WITH WEBSITE: {0}".format(url))
        return False
开发者ID:Dimpsy,项目名称:AcePictureBot,代码行数:24,代码来源:mal_list.py

示例5: Session

# 需要导入模块: from requests import Session [as 别名]
# 或者: from requests.Session import headers['User-Agent'] [as 别名]
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

from __future__ import unicode_literals

import datetime
from requests import Session

session = Session()
session.headers['User-Agent'] = 'kodi.tv'
session.headers['app-version-android'] = '999'


class ImageMixin(object):
    image_id = None
    _image_url = "http://m.nrk.no/m/img?kaleidoId=%s&width=%d"

    @property
    def thumb(self):
        return self._image_url % (self.image_id, 500) if self.image_id else None

    @property
    def fanart(self):
        return self._image_url % (self.image_id, 1920) if self.image_id else None
开发者ID:esnalabu,项目名称:xbmc-addon-nrk,代码行数:32,代码来源:nrktv.py

示例6: Copyright

# 需要导入模块: from requests import Session [as 别名]
# 或者: from requests.Session import headers['User-Agent'] [as 别名]
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Espen Hovlandsdal

from requests import Session

API_URL = 'http://api.vg.no/podcast';

session = Session()
session.headers['User-Agent'] = 'kodi-vg-podcasts'
session.headers['Accept'] = 'application/json'

class Base(object):
    id = None
    title = None
    subtitle = None
    thumb = None
    logo = None

    def __init__(self, **kwargs):
        self.__dict__.update(kwargs)

class Show(Base):
    @staticmethod
    def from_response(r):
        return Show(
            id=r['slug'],
            title=r['name'],
            subtitle=r['subtitle'],
            logo=r['logo'],
            thumb=r['logoThumb']
开发者ID:rexxars,项目名称:kodi-vg-podcasts,代码行数:33,代码来源:podcastapi.py


注:本文中的requests.Session.headers['User-Agent']方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。