当前位置: 首页>>代码示例>>Python>>正文


Python HttpProxyMiddleware.process_request方法代码示例

本文整理汇总了Python中scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware.process_request方法的典型用法代码示例。如果您正苦于以下问题:Python HttpProxyMiddleware.process_request方法的具体用法?Python HttpProxyMiddleware.process_request怎么用?Python HttpProxyMiddleware.process_request使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware的用法示例。


在下文中一共展示了HttpProxyMiddleware.process_request方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_proxy_auth_encoding

# 需要导入模块: from scrapy.downloadermiddlewares.httpproxy import HttpProxyMiddleware [as 别名]
# 或者: from scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware import process_request [as 别名]
    def test_proxy_auth_encoding(self):
        # utf-8 encoding
        os.environ['http_proxy'] = u'https://m\u00E1n:[email protected]:3128'
        mw = HttpProxyMiddleware(auth_encoding='utf-8')
        req = Request('http://scrapytest.org')
        assert mw.process_request(req, spider) is None
        self.assertEqual(req.meta, {'proxy': 'https://proxy:3128'})
        self.assertEqual(req.headers.get('Proxy-Authorization'), b'Basic bcOhbjpwYXNz')

        # proxy from request.meta
        req = Request('http://scrapytest.org', meta={'proxy': u'https://\u00FCser:[email protected]:3128'})
        assert mw.process_request(req, spider) is None
        self.assertEqual(req.meta, {'proxy': 'https://proxy:3128'})
        self.assertEqual(req.headers.get('Proxy-Authorization'), b'Basic w7xzZXI6cGFzcw==')

        # default latin-1 encoding
        mw = HttpProxyMiddleware(auth_encoding='latin-1')
        req = Request('http://scrapytest.org')
        assert mw.process_request(req, spider) is None
        self.assertEqual(req.meta, {'proxy': 'https://proxy:3128'})
        self.assertEqual(req.headers.get('Proxy-Authorization'), b'Basic beFuOnBhc3M=')

        # proxy from request.meta, latin-1 encoding
        req = Request('http://scrapytest.org', meta={'proxy': u'https://\u00FCser:[email protected]:3128'})
        assert mw.process_request(req, spider) is None
        self.assertEqual(req.meta, {'proxy': 'https://proxy:3128'})
        self.assertEqual(req.headers.get('Proxy-Authorization'), b'Basic /HNlcjpwYXNz')
开发者ID:Parlin-Galanodel,项目名称:scrapy,代码行数:29,代码来源:test_downloadermiddleware_httpproxy.py

示例2: test_proxy_auth_empty_passwd

# 需要导入模块: from scrapy.downloadermiddlewares.httpproxy import HttpProxyMiddleware [as 别名]
# 或者: from scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware import process_request [as 别名]
 def test_proxy_auth_empty_passwd(self):
     os.environ['http_proxy'] = 'https://user:@proxy:3128'
     mw = HttpProxyMiddleware()
     req = Request('http://scrapytest.org')
     assert mw.process_request(req, spider) is None
     self.assertEqual(req.meta, {'proxy': 'https://proxy:3128'})
     self.assertEqual(req.headers.get('Proxy-Authorization'), b'Basic dXNlcjo=')
     # proxy from request.meta
     req = Request('http://scrapytest.org', meta={'proxy': 'https://username:@proxy:3128'})
     assert mw.process_request(req, spider) is None
     self.assertEqual(req.meta, {'proxy': 'https://proxy:3128'})
     self.assertEqual(req.headers.get('Proxy-Authorization'), b'Basic dXNlcm5hbWU6')
开发者ID:Parlin-Galanodel,项目名称:scrapy,代码行数:14,代码来源:test_downloadermiddleware_httpproxy.py

示例3: test_proxy_auth

# 需要导入模块: from scrapy.downloadermiddlewares.httpproxy import HttpProxyMiddleware [as 别名]
# 或者: from scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware import process_request [as 别名]
 def test_proxy_auth(self):
     os.environ['http_proxy'] = 'https://user:[email protected]:3128'
     mw = HttpProxyMiddleware()
     req = Request('http://scrapytest.org')
     assert mw.process_request(req, spider) is None
     self.assertEquals(req.meta, {'proxy': 'https://proxy:3128'})
     self.assertEquals(req.headers.get('Proxy-Authorization'), b'Basic dXNlcjpwYXNz')
开发者ID:01-,项目名称:scrapy,代码行数:9,代码来源:test_downloadermiddleware_httpproxy.py

示例4: test_no_proxy

# 需要导入模块: from scrapy.downloadermiddlewares.httpproxy import HttpProxyMiddleware [as 别名]
# 或者: from scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware import process_request [as 别名]
    def test_no_proxy(self):
        os.environ['http_proxy'] = http_proxy = 'https://proxy.for.http:3128'
        mw = HttpProxyMiddleware()

        os.environ['no_proxy'] = '*'
        req = Request('http://noproxy.com')
        assert mw.process_request(req, spider) is None
        assert 'proxy' not in req.meta

        os.environ['no_proxy'] = 'other.com'
        req = Request('http://noproxy.com')
        assert mw.process_request(req, spider) is None
        assert 'proxy' in req.meta

        os.environ['no_proxy'] = 'other.com,noproxy.com'
        req = Request('http://noproxy.com')
        assert mw.process_request(req, spider) is None
        assert 'proxy' not in req.meta
开发者ID:01-,项目名称:scrapy,代码行数:20,代码来源:test_downloadermiddleware_httpproxy.py

示例5: test_no_enviroment_proxies

# 需要导入模块: from scrapy.downloadermiddlewares.httpproxy import HttpProxyMiddleware [as 别名]
# 或者: from scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware import process_request [as 别名]
    def test_no_enviroment_proxies(self):
        os.environ = {'dummy_proxy': 'reset_env_and_do_not_raise'}
        mw = HttpProxyMiddleware()

        for url in ('http://e.com', 'https://e.com', 'file:///tmp/a'):
            req = Request(url)
            assert mw.process_request(req, spider) is None
            self.assertEquals(req.url, url)
            self.assertEquals(req.meta, {})
开发者ID:01-,项目名称:scrapy,代码行数:11,代码来源:test_downloadermiddleware_httpproxy.py

示例6: test_enviroment_proxies

# 需要导入模块: from scrapy.downloadermiddlewares.httpproxy import HttpProxyMiddleware [as 别名]
# 或者: from scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware import process_request [as 别名]
    def test_enviroment_proxies(self):
        os.environ['http_proxy'] = http_proxy = 'https://proxy.for.http:3128'
        os.environ['https_proxy'] = https_proxy = 'http://proxy.for.https:8080'
        os.environ.pop('file_proxy', None)
        mw = HttpProxyMiddleware()

        for url, proxy in [('http://e.com', http_proxy),
                ('https://e.com', https_proxy), ('file://tmp/a', None)]:
            req = Request(url)
            assert mw.process_request(req, spider) is None
            self.assertEquals(req.url, url)
            self.assertEquals(req.meta.get('proxy'), proxy)
开发者ID:01-,项目名称:scrapy,代码行数:14,代码来源:test_downloadermiddleware_httpproxy.py

示例7: test_no_proxy

# 需要导入模块: from scrapy.downloadermiddlewares.httpproxy import HttpProxyMiddleware [as 别名]
# 或者: from scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware import process_request [as 别名]
    def test_no_proxy(self):
        os.environ['http_proxy'] = 'https://proxy.for.http:3128'
        mw = HttpProxyMiddleware()

        os.environ['no_proxy'] = '*'
        req = Request('http://noproxy.com')
        assert mw.process_request(req, spider) is None
        assert 'proxy' not in req.meta

        os.environ['no_proxy'] = 'other.com'
        req = Request('http://noproxy.com')
        assert mw.process_request(req, spider) is None
        assert 'proxy' in req.meta

        os.environ['no_proxy'] = 'other.com,noproxy.com'
        req = Request('http://noproxy.com')
        assert mw.process_request(req, spider) is None
        assert 'proxy' not in req.meta

        # proxy from meta['proxy'] takes precedence
        os.environ['no_proxy'] = '*'
        req = Request('http://noproxy.com', meta={'proxy': 'http://proxy.com'})
        assert mw.process_request(req, spider) is None
        self.assertEqual(req.meta, {'proxy': 'http://proxy.com'})
开发者ID:Parlin-Galanodel,项目名称:scrapy,代码行数:26,代码来源:test_downloadermiddleware_httpproxy.py

示例8: test_proxy_already_seted

# 需要导入模块: from scrapy.downloadermiddlewares.httpproxy import HttpProxyMiddleware [as 别名]
# 或者: from scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware import process_request [as 别名]
 def test_proxy_already_seted(self):
     os.environ['http_proxy'] = http_proxy = 'https://proxy.for.http:3128'
     mw = HttpProxyMiddleware()
     req = Request('http://noproxy.com', meta={'proxy': None})
     assert mw.process_request(req, spider) is None
     assert 'proxy' in req.meta and req.meta['proxy'] is None
开发者ID:01-,项目名称:scrapy,代码行数:8,代码来源:test_downloadermiddleware_httpproxy.py

示例9: test_proxy_precedence_meta

# 需要导入模块: from scrapy.downloadermiddlewares.httpproxy import HttpProxyMiddleware [as 别名]
# 或者: from scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware import process_request [as 别名]
 def test_proxy_precedence_meta(self):
     os.environ['http_proxy'] = 'https://proxy.com'
     mw = HttpProxyMiddleware()
     req = Request('http://scrapytest.org', meta={'proxy': 'https://new.proxy:3128'})
     assert mw.process_request(req, spider) is None
     self.assertEqual(req.meta, {'proxy': 'https://new.proxy:3128'})
开发者ID:Parlin-Galanodel,项目名称:scrapy,代码行数:8,代码来源:test_downloadermiddleware_httpproxy.py


注:本文中的scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware.process_request方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。