本文整理汇总了Python中urllib.request.addinfourl函数的典型用法代码示例。如果您正苦于以下问题:Python addinfourl函数的具体用法?Python addinfourl怎么用?Python addinfourl使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了addinfourl函数的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: http_open
def http_open(self, req):
url = req.get_full_url()
try:
content = self.urls[url]
except KeyError:
resp = addinfourl(StringIO(""), None, url)
resp.code = 404
resp.msg = "OK"
else:
resp = addinfourl(StringIO(content), None, url)
resp.code = 200
resp.msg = "OK"
return resp
示例2: http_open
def http_open(self, req):
fract = urlparse(req.get_full_url())
if self.url_target == fract.netloc:
date = "-".join(tuple(filter(None, fract.path.split('/')))[-3:])
if date in data:
resp = request.addinfourl(BytesIO(str(data[date]["djia"]).encode('utf-8')), "msg", req.get_full_url())
resp.code = 200
resp.msg = "OK"
else:
resp = request.addinfourl(BytesIO("error\ndata not available yet".encode('utf-8')), "msg", req.get_full_url())
resp.code = 404
resp.msg = "Not Found"
else:
raise NotImplementedError
return resp
示例3: decode_url
def decode_url(self, url):
target, _, headers, _ = self.translate_all(url)
headers_fp = open(headers, 'rb')
code, = struct.unpack('>h', headers_fp.read(2))
def make_headers(fp):
return HTTPMessage(fp) if PY2 else parse_headers(fp)
return addinfourl(open(target, 'rb'), make_headers(headers_fp), url, code)
示例4: http_response
def http_response(self, req, resp):
old_resp = resp
# gzip
if resp.headers.get("content-encoding") == "gzip":
gz = GzipFile(
fileobj=StringIO(resp.read()),
mode="r"
)
resp = request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
# deflate
if resp.headers.get("content-encoding") == "deflate":
gz = StringIO(deflate(resp.read()))
resp = request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code) # 'class to add info() and
resp.msg = old_resp.msg
return resp
示例5: https_open
def https_open(self, req):
host = req.get_host()
if not host:
raise M2Crypto.m2urllib2.URLError('no host given: ' + req.get_full_url())
# Our change: Check to see if we're using a proxy.
# Then create an appropriate ssl-aware connection.
full_url = req.get_full_url()
target_host = urlparse(full_url)[1]
if (target_host != host):
h = myProxyHTTPSConnection(host = host, appname = self.appname, ssl_context = self.ctx)
# M2Crypto.ProxyHTTPSConnection.putrequest expects a fullurl
selector = full_url
else:
h = myHTTPSConnection(host = host, appname = self.appname, ssl_context = self.ctx)
selector = req.get_selector()
# End our change
h.set_debuglevel(self._debuglevel)
if self.saved_session:
h.set_session(self.saved_session)
headers = dict(req.headers)
headers.update(req.unredirected_hdrs)
# We want to make an HTTP/1.1 request, but the addinfourl
# class isn't prepared to deal with a persistent connection.
# It will try to read all remaining data from the socket,
# which will block while the server waits for the next request.
# So make sure the connection gets closed after the (only)
# request.
headers["Connection"] = "close"
try:
h.request(req.get_method(), selector, req.data, headers)
s = h.get_session()
if s:
self.saved_session = s
r = h.getresponse()
except socket.error as err: # XXX what error?
err.filename = full_url
raise M2Crypto.m2urllib2.URLError(err)
# Pick apart the HTTPResponse object to get the addinfourl
# object initialized properly.
# Wrap the HTTPResponse object in socket's file object adapter
# for Windows. That adapter calls recv(), so delegate recv()
# to read(). This weird wrapping allows the returned object to
# have readline() and readlines() methods.
# XXX It might be better to extract the read buffering code
# out of socket._fileobject() and into a base class.
r.recv = r.read
fp = socket._fileobject(r)
resp = addinfourl(fp, r.msg, req.get_full_url())
resp.code = r.status
resp.msg = r.reason
return resp
示例6: open
def open(self, url, conn_timeout=None):
if conn_timeout == 0:
raise urllib_error.URLError('Could not reach %s within deadline.' % url)
if url.startswith('http'):
self.opened.set()
if self.error:
raise urllib_error.HTTPError(url, self.error, None, None, Compatibility.BytesIO(b'glhglhg'))
return urllib_request.addinfourl(Compatibility.BytesIO(self.rv), url, None, self.code)
示例7: decode_url
def decode_url(self, url):
target, _, headers, _ = self.translate_all(url)
headers_fp = open(headers)
code, = struct.unpack(">h", headers_fp.read(2))
def make_headers(fp):
return HTTPMessage(fp) if Compatibility.PY2 else parse_headers(fp)
return addinfourl(open(target), make_headers(headers_fp), url, code)
示例8: http_response
def http_response(self, req, response):
decompressed = None
if response.headers.get('content-encoding') == 'bzip2':
import bz2
decompressed = io.BytesIO(bz2.decompress(response.read()))
elif response.headers.get('content-encoding') == 'gzip':
from gzip import GzipFile
decompressed = GzipFile(fileobj=io.BytesIO(response.read()), mode='r')
elif response.headers.get('content-encoding') == 'deflate':
import zlib
try:
decompressed = io.BytesIO(zlib.decompress(response.read()))
except zlib.error: # they ignored RFC1950
decompressed = io.BytesIO(zlib.decompress(response.read(), -zlib.MAX_WBITS))
if decompressed:
old_response = response
response = urllib_request.addinfourl(decompressed, old_response.headers, old_response.url, old_response.code)
response.msg = old_response.msg
return response
示例9: s3_open
def s3_open(self, req):
# The implementation was inspired mainly by the code behind
# urllib.request.FileHandler.file_open().
bucket_name = req.host
key_name = url2pathname(req.selector)[1:]
if not bucket_name or not key_name:
raise URLError('url must be in the format s3://<bucket>/<key>')
try:
conn = self._conn
except AttributeError:
conn = self._conn = boto.s3.connection.S3Connection()
bucket = conn.get_bucket(bucket_name, validate=False)
key = bucket.get_key(key_name)
origurl = 's3://{}/{}'.format(bucket_name, key_name)
if key is None:
raise URLError('no such resource: {}'.format(origurl))
headers = [
('Content-type', key.content_type),
('Content-encoding', key.content_encoding),
('Content-language', key.content_language),
('Content-length', key.size),
('Etag', key.etag),
('Last-modified', key.last_modified),
]
headers = email.message_from_string(
'\n'.join('{}: {}'.format(key, value) for key, value in headers
if value is not None))
return addinfourl(_FileLikeKey(key), headers, origurl)
示例10: addinfourl_wrapper
def addinfourl_wrapper(stream, headers, url, code):
if hasattr(compat_urllib_request.addinfourl, 'getcode'):
return compat_urllib_request.addinfourl(stream, headers, url, code)
ret = compat_urllib_request.addinfourl(stream, headers, url)
ret.code = code
return ret
示例11: http_error_303
def http_error_303(self, req, fp, code, msg, headers):
infourl = addinfourl(fp, headers, req.get_full_url())
infourl.status = code
infourl.code = code
return infourl
示例12: http_error_206
def http_error_206(self, req, fp, code, msg, hdrs):
r = addinfourl(fp, hdrs, req.get_full_url())
r.code = code
r.msg = msg
return r
示例13: http_open
def http_open(self, req):
resp = addinfourl(StringIO('test'), '', req.get_full_url(), 200)
resp.msg = 'OK'
return resp
示例14: https_open
def https_open(self, req):
# https://docs.python.org/3.3/library/urllib.request.html#urllib.request.Request.get_host
try: # up to python-3.2
host = req.get_host()
except AttributeError: # from python-3.3
host = req.host
if not host:
raise M2Crypto.m2urllib2.URLError('no host given')
# Our change: Check to see if we're using a proxy.
# Then create an appropriate ssl-aware connection.
full_url = req.get_full_url()
target_host = urlparse(full_url)[1]
if target_host != host:
request_uri = urldefrag(full_url)[0]
h = httpslib.ProxyHTTPSConnection(host=host, ssl_context=self.ctx)
else:
try: # up to python-3.2
request_uri = req.get_selector()
except AttributeError: # from python-3.3
request_uri = req.selector
h = httpslib.HTTPSConnection(host=host, ssl_context=self.ctx)
# End our change
h.set_debuglevel(self._debuglevel)
headers = dict(req.headers)
headers.update(req.unredirected_hdrs)
# We want to make an HTTP/1.1 request, but the addinfourl
# class isn't prepared to deal with a persistent connection.
# It will try to read all remaining data from the socket,
# which will block while the server waits for the next request.
# So make sure the connection gets closed after the (only)
# request.
headers["Connection"] = "close"
try:
h.request(req.get_method(), request_uri, req.data, headers)
r = h.getresponse()
except socket.error as err: # XXX what error?
raise M2Crypto.m2urllib2.URLError(err)
# Pick apart the HTTPResponse object to get the addinfourl
# object initialized properly.
# Wrap the HTTPResponse object in socket's file object adapter
# for Windows. That adapter calls recv(), so delegate recv()
# to read(). This weird wrapping allows the returned object to
# have readline() and readlines() methods.
r.recv = r.read
if (sys.version_info < (3, 0)):
fp = socket._fileobject(r, close=True)
else:
r._decref_socketios = lambda: None
r.ssl = h.sock.ssl
r._timeout = -1.0
# hack to bypass python3 bug with 0 buffer size and
# http/client.py readinto method for response class
if r.length is not None and r.length == 0:
r.readinto = lambda b: 0
r.recv_into = r.readinto
fp = socket.SocketIO(r, 'rb')
resp = addinfourl(fp, r.msg, req.get_full_url())
resp.code = r.status
resp.msg = r.reason
return resp