本文整理匯總了Python中urllib.request.build_opener方法的典型用法代碼示例。如果您正苦於以下問題:Python request.build_opener方法的具體用法?Python request.build_opener怎麽用?Python request.build_opener使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類urllib.request
的用法示例。
在下文中一共展示了request.build_opener方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import build_opener [as 別名]
def __init__(
self,
host,
port=8069,
timeout=120,
version=None,
deserialize=True,
opener=None,
):
super(ConnectorJSONRPC, self).__init__(host, port, timeout, version)
self.deserialize = deserialize
# One URL opener (with cookies handling) shared between
# JSON and HTTP requests
if opener is None:
cookie_jar = CookieJar()
opener = build_opener(HTTPCookieProcessor(cookie_jar))
self._opener = opener
self._proxy_json, self._proxy_http = self._get_proxies()
示例2: get_access_token
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import build_opener [as 別名]
def get_access_token(self, code, state=None):
'''
In callback url: http://host/callback?code=123&state=xyz
use code and state to get an access token.
'''
kw = dict(client_id=self._client_id, client_secret=self._client_secret, code=code)
if self._redirect_uri:
kw['redirect_uri'] = self._redirect_uri
if state:
kw['state'] = state
opener = build_opener(HTTPSHandler)
request = Request('https://github.com/login/oauth/access_token', data=_encode_params(kw))
request.get_method = _METHOD_MAP['POST']
request.add_header('Accept', 'application/json')
try:
response = opener.open(request, timeout=TIMEOUT)
r = _parse_json(response.read())
if 'error' in r:
raise ApiAuthError(str(r.error))
return str(r.access_token)
except HTTPError as e:
raise ApiAuthError('HTTPError when get access token')
示例3: do_socks
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import build_opener [as 別名]
def do_socks(self, line):
headers = ["Protocol", "Target", "Username", "AdminStatus", "Port"]
url = "http://localhost:9090/ntlmrelayx/api/v1.0/relays"
try:
proxy_handler = ProxyHandler({})
opener = build_opener(proxy_handler)
response = Request(url)
r = opener.open(response)
result = r.read()
items = json.loads(result)
except Exception as e:
logging.error("ERROR: %s" % str(e))
else:
if len(items) > 0:
self.printTable(items, header=headers)
else:
logging.info('No Relays Available!')
示例4: get_response
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import build_opener [as 別名]
def get_response(url, faker = False):
logging.debug('get_response: %s' % url)
# install cookies
if cookies:
opener = request.build_opener(request.HTTPCookieProcessor(cookies))
request.install_opener(opener)
if faker:
response = request.urlopen(request.Request(url, headers = fake_headers), None)
else:
response = request.urlopen(url)
data = response.read()
if response.info().get('Content-Encoding') == 'gzip':
data = ungzip(data)
elif response.info().get('Content-Encoding') == 'deflate':
data = undeflate(data)
response.data = data
return response
# DEPRECATED in favor of get_content()
示例5: scrape
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import build_opener [as 別名]
def scrape(category_name,commodity_name):
#i use proxy handler cuz my uni network runs on its proxy
#and i cannot authenticate python through the proxy
#so i use empty proxy to bypass the authentication
proxy_handler = u.ProxyHandler({})
opener = u.build_opener(proxy_handler)
#cme officially forbids scraping
#so a header must be used for disguise as an internet browser
#the developers say no to scraping, it appears to be so
#but actually they turn a blind eye to us, thx
#i need different types of commodity
#so i need to format the website for each commodity
req=u.Request('http://www.cmegroup.com/trading/metals/%s/%s.html'%(
category_name,commodity_name),headers={'User-Agent': 'Mozilla/5.0'})
response=opener.open(req)
result=response.read()
soup=bs(result,'html.parser')
return soup
#
示例6: __init__
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import build_opener [as 別名]
def __init__(self):
self.articles = []
self.query = None
self.cjar = MozillaCookieJar()
# If we have a cookie file, load it:
if ScholarConf.COOKIE_JAR_FILE and \
os.path.exists(ScholarConf.COOKIE_JAR_FILE):
try:
self.cjar.load(ScholarConf.COOKIE_JAR_FILE,
ignore_discard=True)
ScholarUtils.log('info', 'loaded cookies file')
except Exception as msg:
ScholarUtils.log('warn', 'could not load cookies file: %s' % msg)
self.cjar = MozillaCookieJar() # Just to be safe
self.opener = build_opener(HTTPCookieProcessor(self.cjar))
self.settings = None # Last settings object, if any
示例7: send_response
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import build_opener [as 別名]
def send_response(event, context, response_status, response_data):
'''Send a resource manipulation status response to CloudFormation'''
response_body = json.dumps({
"Status": response_status,
"Reason": "See the details in CloudWatch Log Stream: " + context.log_stream_name,
"PhysicalResourceId": context.log_stream_name,
"StackId": event['StackId'],
"RequestId": event['RequestId'],
"LogicalResourceId": event['LogicalResourceId'],
"Data": response_data
})
logger.info('ResponseURL: %s', event['ResponseURL'])
logger.info('ResponseBody: %s', response_body)
opener = build_opener(HTTPHandler)
request = Request(event['ResponseURL'], data=response_body.encode('utf-8'))
request.add_header('Content-Type', '')
request.add_header('Content-Length', len(response_body))
request.get_method = lambda: 'PUT'
response = opener.open(request)
logger.info("Status code: %s", response.getcode())
logger.info("Status message: %s", response.msg)
示例8: send_response
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import build_opener [as 別名]
def send_response(event, context, response_status, response_data):
"""
Send a resource manipulation status response to CloudFormation
"""
response_body = json.dumps({
"Status": response_status,
"Reason": "See the details in CloudWatch Log Stream: " + context.log_stream_name,
"PhysicalResourceId": context.log_stream_name,
"StackId": event['StackId'],
"RequestId": event['RequestId'],
"LogicalResourceId": event['LogicalResourceId'],
"Data": response_data
})
LOGGER.info('ResponseURL: {s}'.format(s=event['ResponseURL']))
LOGGER.info('ResponseBody: {s}'.format(s=response_body))
opener = build_opener(HTTPHandler)
request = Request(event['ResponseURL'], data=response_body.encode('utf-8'))
request.add_header('Content-Type', '')
request.add_header('Content-Length', len(response_body))
request.get_method = lambda: 'PUT'
response = opener.open(request)
LOGGER.info("Status code: {s}".format(s=response.getcode))
LOGGER.info("Status message: {s}".format(s=response.msg))
示例9: load_html
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import build_opener [as 別名]
def load_html(url, with_cookies=False, headers={}):
"""Attempts to load an HTML page, returning a BeautifulSoup instance. Raises
any networking or parsing exceptions"""
if with_cookies:
cj = CookieJar()
opener = urlopen.build_opener(urlopen.HTTPCookieProcessor(cj))
else:
opener = urlopen.build_opener()
request = urlopen.Request(url, headers=headers)
response = opener.open(request)
html = response.read().decode('utf-8', errors='replace')
soup = BeautifulSoup(html, 'html.parser')
return soup
示例10: check_php_multipartform_dos
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import build_opener [as 別名]
def check_php_multipartform_dos(url, post_body, headers, ip):
try:
proxy_handler = urllib2.ProxyHandler({"http": ip})
null_proxy_handler = urllib2.ProxyHandler({})
opener = urllib2.build_opener(proxy_handler)
urllib2.install_opener(opener)
req = urllib2.Request(url)
for key in headers.keys():
req.add_header(key, headers[key])
starttime = datetime.datetime.now()
fd = urllib2.urlopen(req, post_body)
html = fd.read()
endtime = datetime.datetime.now()
usetime = (endtime - starttime).seconds
if(usetime > 5):
result = url+" is vulnerable"
else:
if(usetime > 3):
result = "need to check normal respond time"
return [result, usetime]
except KeyboardInterrupt:
exit()
# end
示例11: delete
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import build_opener [as 別名]
def delete(uri, params={}, headers={}):
data = None # always none in GET
if params:
uri = "%s?%s" % (uri, urlencode(params))
url_opener = build_opener(HTTPHandler)
req = Request(uri, data)
req.get_method = lambda: 'DELETE'
for (k, v) in headers.items():
req.add_header(k, v)
request = url_opener.open(req)
response = request.read()
# code = request.code
return response
示例12: post
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import build_opener [as 別名]
def post(uri, params={}, headers={}):
data = None # always none in GET
if params:
# TODO: sure it's json and not urlencode?
# data = urlencode(params)
data = unicode_to_bytes(jsoner.dumps(params))
url_opener = build_opener(HTTPHandler)
req = Request(uri, data)
req.get_method = lambda: 'POST'
for (k, v) in headers.items():
req.add_header(k, v)
request = url_opener.open(req)
response = request.read()
# code = request.code
return response
示例13: put
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import build_opener [as 別名]
def put(uri, data=None, params={}, headers=None):
# data = None # always none in GET
if headers is None:
headers = {}
if params:
# TODO: sure it's json and not urlencode?
# data = urlencode(params)
uri = "%s?%s" % (uri, urlencode(params))
headers['Content-Type'] = 'your/contenttype'
url_opener = build_opener(HTTPHandler)
req = Request(uri, data)
req.get_method = lambda: 'PUT'
for (k, v) in headers.items():
req.add_header(k, v)
request = url_opener.open(req)
response = request.read()
# code = request.code
return response
示例14: download
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import build_opener [as 別名]
def download(self, url, retry_count=3, headers=None, proxy=None, data=None):
if url is None:
return None
try:
req = request.Request(url, headers=headers, data=data)
cookie = cookiejar.CookieJar()
cookie_process = request.HTTPCookieProcessor(cookie)
opener = request.build_opener()
if proxy:
proxies = {urlparse(url).scheme: proxy}
opener.add_handler(request.ProxyHandler(proxies))
content = opener.open(req).read()
except error.URLError as e:
print('HtmlDownLoader download error:', e.reason)
content = None
if retry_count > 0:
if hasattr(e, 'code') and 500 <= e.code < 600:
#說明是 HTTPError 錯誤且 HTTP CODE 為 5XX 範圍說明是服務器錯誤,可以嘗試再次下載
return self.download(url, retry_count-1, headers, proxy, data)
return content
示例15: default_handler
# 需要導入模塊: from urllib import request [as 別名]
# 或者: from urllib.request import build_opener [as 別名]
def default_handler(url, method, timeout, headers, data):
"""Default handler that implements HTTP/HTTPS connections.
Used by the push_to_gateway functions. Can be re-used by other handlers."""
def handle():
request = Request(url, data=data)
request.get_method = lambda: method
for k, v in headers:
request.add_header(k, v)
resp = build_opener(HTTPHandler).open(request, timeout=timeout)
if resp.code >= 400:
raise IOError("error talking to pushgateway: {0} {1}".format(
resp.code, resp.msg))
return handle