本文整理汇总了Python中requests.Response.close方法的典型用法代码示例。如果您正苦于以下问题:Python Response.close方法的具体用法?Python Response.close怎么用?Python Response.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类requests.Response
的用法示例。
在下文中一共展示了Response.close方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: handle_401
# 需要导入模块: from requests import Response [as 别名]
# 或者: from requests.Response import close [as 别名]
def handle_401(self, r: R, **kwargs):
"""Takes the given response and re-tries auth with a new nonce."""
if r.status_code == 401 and self.num_401_calls < 2:
self.num_401_calls += 1
# Renew nonce
nonce = self.new_nonce(max(self._nonce(), self.last_nonce + 1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.close()
prep = r.request.copy()
cookies = prep._cookies
requests.auth.extract_cookies_to_jar(cookies, r.request, r.raw)
prep.prepare_cookies(cookies)
self.auth_request(prep, nonce)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
return r
示例2: send
# 需要导入模块: from requests import Response [as 别名]
# 或者: from requests.Response import close [as 别名]
def send(self, request, stream=None, timeout=None, verify=None, cert=None, proxies=None):
pathname = url_to_path(request.url)
resp = Response()
resp.status_code = 200
resp.url = request.url
try:
stats = stat(pathname)
except OSError as exc:
resp.status_code = 404
resp.raw = exc
else:
modified = formatdate(stats.st_mtime, usegmt=True)
content_type = guess_type(pathname)[0] or "text/plain"
resp.headers = CaseInsensitiveDict({
"Content-Type": content_type,
"Content-Length": stats.st_size,
"Last-Modified": modified,
})
resp.raw = open(pathname, "rb")
resp.close = resp.raw.close
return resp
示例3: send
# 需要导入模块: from requests import Response [as 别名]
# 或者: from requests.Response import close [as 别名]
def send(self, request, stream=None, timeout=None, verify=None, cert=None, proxies=None):
resp = Response()
resp.status_code = 200
resp.url = request.url
try:
import boto
except ImportError:
stderrlog.info('\nError: boto is required for S3 channels. '
'Please install it with `conda install boto`\n'
'Make sure to run `source deactivate` if you '
'are in a conda environment.\n')
resp.status_code = 404
return resp
conn = boto.connect_s3()
bucket_name, key_string = url_to_s3_info(request.url)
# Get the bucket without validation that it exists and that we have
# permissions to list its contents.
bucket = conn.get_bucket(bucket_name, validate=False)
try:
key = bucket.get_key(key_string)
except boto.exception.S3ResponseError as exc:
# This exception will occur if the bucket does not exist or if the
# user does not have permission to list its contents.
resp.status_code = 404
resp.raw = exc
return resp
if key and key.exists:
modified = key.last_modified
content_type = key.content_type or "text/plain"
resp.headers = CaseInsensitiveDict({
"Content-Type": content_type,
"Content-Length": key.size,
"Last-Modified": modified,
})
_, self._temp_file = mkstemp()
key.get_contents_to_filename(self._temp_file)
f = open(self._temp_file, 'rb')
resp.raw = f
resp.close = resp.raw.close
else:
resp.status_code = 404
return resp
示例4: __init__
# 需要导入模块: from requests import Response [as 别名]
# 或者: from requests.Response import close [as 别名]
def __init__(self,
requests_response: requests.Response,
max_size: Optional[int],
error_is_client_side: bool = False):
"""Constructor."""
try:
# Read the raw data right away without waiting for a call to raw_data() to make sure that the server doesn't
# time out while returning stuff
self.__response_data = self.__read_response_data(requests_response=requests_response, max_size=max_size)
# Release the response to return connection back to the pool
# (http://docs.python-requests.org/en/master/user/advanced/#body-content-workflow)
requests_response.close()
except Exception as ex:
log.warning("Error reading data for URL {}: {}".format(requests_response.url, str(ex)))
error_response = requests.Response()
error_response.status_code = HTTPStatus.REQUEST_TIMEOUT.value
error_response.reason = HTTPStatus.REQUEST_TIMEOUT.phrase
error_response.request = requests_response.request
error_response.history = []
requests_response = error_response
# We treat timeouts as client-side errors too because we can retry on them
error_is_client_side = True
self.__response_data = str(ex).encode('utf-8')
self.__requests_response = requests_response
self.__error_is_client_side = error_is_client_side
self.__previous_response = None
self.__request = None
示例5: send
# 需要导入模块: from requests import Response [as 别名]
# 或者: from requests.Response import close [as 别名]
def send(self, request, stream=False, timeout=None, **kwargs):
parsed_url = urllib.parse.urlparse(request.url)
file_path = parsed_url.path
# Strip the leading slash, if present.
if file_path.startswith('/'):
file_path = file_path[1:]
try:
self.conn = self.get_connection(parsed_url.netloc, timeout)
except ConnectionRefusedError as exc:
# Wrap this in a requests exception.
# in requests 2.2.1, ConnectionError does not take keyword args
raise requests.exceptions.ConnectionError() from exc
resp = Response()
resp.url = request.url
try:
size = self.conn.size(file_path)
except error_perm:
resp.status_code = 404
return resp
if stream:
# We have to do this in a background thread, since ftplib's and requests' approaches are the opposite:
# ftplib is callback based, and requests needs to expose an iterable. (Push vs pull)
# When the queue size is reached, puts will block. This provides some backpressure.
queue = Queue(maxsize=100)
done_sentinel = object()
def handle_transfer():
# Download all the chunks into a queue, then place a sentinel object into it to signal completion.
self.conn.retrbinary('RETR ' + file_path, queue.put)
queue.put(done_sentinel)
Thread(target=handle_transfer).start()
def stream(amt=8192, decode_content=False):
"""A generator, yielding chunks from the queue."""
# We maintain a buffer so the consumer gets exactly the number of bytes requested.
buffer = bytearray()
while True:
data = queue.get()
if data is not done_sentinel:
buffer.extend(data)
if len(buffer) >= amt:
result = buffer[:amt]
buffer = buffer[amt:]
yield result
else:
if buffer:
yield buffer
return
Raw = namedtuple('raw', 'stream')
raw = Raw(stream)
resp.status_code = 200
resp.raw = raw
resp.headers['content-length'] = size
resp.close = lambda: self.conn.close()
return resp
else:
# Not relevant for Ubuntu Make.
raise NotImplementedError