本文整理汇总了Python中aiohttp.client方法的典型用法代码示例。如果您正苦于以下问题:Python aiohttp.client方法的具体用法?Python aiohttp.client怎么用?Python aiohttp.client使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类aiohttp
的用法示例。
在下文中一共展示了aiohttp.client方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: close
# 需要导入模块: import aiohttp [as 别名]
# 或者: from aiohttp import client [as 别名]
def close(self):
"""Close all fixtures created by the test client.
After that point, the TestClient is no longer usable.
This is an idempotent function: running close multiple times
will not have any additional effects.
close is also run when the object is garbage collected, and on
exit when used as a context manager.
"""
if self.started and not self.closed:
self.server.close()
yield from self.server.wait_closed()
self._root = None
self.port = None
yield from self._close_hook()
self._closed = True
示例2: _send
# 需要导入模块: import aiohttp [as 别名]
# 或者: from aiohttp import client [as 别名]
def _send(self, request):
# Note: When using aiobotocore with dynamodb, requests fail on crc32
# checksum computation as soon as the response data reaches ~5KB.
# When AWS response is gzip compressed:
# 1. aiohttp is automatically decompressing the data
# (http://aiohttp.readthedocs.io/en/stable/client.html#binary-response-content)
# 2. botocore computes crc32 on the uncompressed data bytes and fails
# cause crc32 has been computed on the compressed data
# The following line forces aws not to use gzip compression,
# if there is a way to configure aiohttp not to perform decompression,
# we can remove the following line and take advantage of
# aws gzip compression.
# https://github.com/boto/botocore/issues/1255
url = request.url
headers = request.headers
data = request.body
headers['Accept-Encoding'] = 'identity'
headers_ = MultiDict(
(z[0], _text(z[1], encoding='utf-8')) for z in headers.items())
# botocore does this during the request so we do this here as well
# TODO: this should be part of the ClientSession, perhaps make wrapper
proxy = self.proxies.get(urlparse(url.lower()).scheme)
if isinstance(data, io.IOBase):
data = _IOBaseWrapper(data)
url = URL(url, encoded=True)
resp = await self.http_session.request(
request.method, url=url, headers=headers_, data=data, proxy=proxy)
# If we're not streaming, read the content so we can retry any timeout
# errors, see:
# https://github.com/boto/botocore/blob/develop/botocore/vendored/requests/sessions.py#L604
if not request.stream_output:
await resp.read()
return resp
示例3: __init__
# 需要导入模块: import aiohttp [as 别名]
# 或者: from aiohttp import client [as 别名]
def __init__(self, url, client=None, headers=None, encoding=None, **kwargs):
self.headers = MultiDict(headers or {})
self.headers.setdefault('Content-Type', 'text/xml')
self.headers.setdefault('User-Agent', self.USER_AGENT)
self.encoding = encoding
self.url = str(url)
self.client = client or aiohttp.client.ClientSession(**kwargs)
示例4: __remote_call
# 需要导入模块: import aiohttp [as 别名]
# 或者: from aiohttp import client [as 别名]
def __remote_call(self, method_name, *args, **kwargs):
async with self.client.post(
str(self.url),
data=etree.tostring(
self._make_request(method_name, *args, **kwargs),
xml_declaration=True,
encoding=self.encoding
),
headers=self.headers,
) as response:
response.raise_for_status()
return self._parse_response((await response.read()), method_name)
示例5: close
# 需要导入模块: import aiohttp [as 别名]
# 或者: from aiohttp import client [as 别名]
def close(self):
return self.client.close()
示例6: wait_for_rest_layer
# 需要导入模块: import aiohttp [as 别名]
# 或者: from aiohttp import client [as 别名]
def wait_for_rest_layer(es, max_attempts=40):
"""
Waits for ``max_attempts`` until Elasticsearch's REST API is available.
:param es: Elasticsearch client to use for connecting.
:param max_attempts: The maximum number of attempts to check whether the REST API is available.
:return: True iff Elasticsearch's REST API is available.
"""
# assume that at least the hosts that we expect to contact should be available. Note that this is not 100%
# bullet-proof as a cluster could have e.g. dedicated masters which are not contained in our list of target hosts
# but this is still better than just checking for any random node's REST API being reachable.
expected_node_count = len(es.transport.hosts)
logger = logging.getLogger(__name__)
for attempt in range(max_attempts):
logger.debug("REST API is available after %s attempts", attempt)
import elasticsearch
try:
# see also WaitForHttpResource in Elasticsearch tests. Contrary to the ES tests we consider the API also
# available when the cluster status is RED (as long as all required nodes are present)
es.cluster.health(wait_for_nodes=">={}".format(expected_node_count))
logger.info("REST API is available for >= [%s] nodes after [%s] attempts.", expected_node_count, attempt)
return True
except elasticsearch.ConnectionError as e:
if "SSL: UNKNOWN_PROTOCOL" in str(e):
raise exceptions.SystemSetupError("Could not connect to cluster via https. Is this an https endpoint?", e)
else:
logger.debug("Got connection error on attempt [%s]. Sleeping...", attempt)
time.sleep(3)
except elasticsearch.TransportError as e:
# cluster block, x-pack not initialized yet, our wait condition is not reached
if e.status_code in (503, 401, 408):
logger.debug("Got status code [%s] on attempt [%s]. Sleeping...", e.status_code, attempt)
time.sleep(3)
else:
logger.warning("Got unexpected status code [%s] on attempt [%s].", e.status_code, attempt)
raise e
return False
示例7: session
# 需要导入模块: import aiohttp [as 别名]
# 或者: from aiohttp import client [as 别名]
def session(self):
"""An internal aiohttp.ClientSession.
Unlike the methods on the TestClient, client session requests
do not automatically include the host in the url queried, and
will require an absolute path to the resource.
"""
return self._session
示例8: setUp
# 需要导入模块: import aiohttp [as 别名]
# 或者: from aiohttp import client [as 别名]
def setUp(self):
self.loop = setup_test_loop()
self.app = self.loop.run_until_complete(self.get_application())
self.client = self.loop.run_until_complete(self._get_client(self.app))
self.loop.run_until_complete(self.client.start_server())
示例9: tearDown
# 需要导入模块: import aiohttp [as 别名]
# 或者: from aiohttp import client [as 别名]
def tearDown(self):
self.loop.run_until_complete(self.client.close())
teardown_test_loop(self.loop)
示例10: _request
# 需要导入模块: import aiohttp [as 别名]
# 或者: from aiohttp import client [as 别名]
def _request(self, method, url, headers, data):
# Note: When using aiobotocore with dynamodb, requests fail on crc32
# checksum computation as soon as the response data reaches ~5KB.
# When AWS response is gzip compressed:
# 1. aiohttp is automatically decompressing the data
# (http://aiohttp.readthedocs.io/en/stable/client.html#binary-response-content)
# 2. botocore computes crc32 on the uncompressed data bytes and fails
# cause crc32 has been computed on the compressed data
# The following line forces aws not to use gzip compression,
# if there is a way to configure aiohttp not to perform decompression,
# we can remove the following line and take advantage of
# aws gzip compression.
# See: https://github.com/aio-libs/aiohttp/issues/1992
headers['Accept-Encoding'] = 'identity'
headers_ = MultiDict(
(z[0], text_(z[1], encoding='utf-8')) for z in headers.items())
# botocore does this during the request so we do this here as well
proxy = self.proxies.get(urlparse(url.lower()).scheme)
if AIOHTTP_2 and isinstance(data, io.IOBase):
data = _IOBaseWrapper(data)
url = URL(url, encoded=True)
# See https://github.com/aio-libs/aiobotocore/issues/267 for details
for i in range(MAX_REDIRECTS):
resp = yield from self._aio_session.request(method, url=url,
headers=headers_,
data=data,
proxy=proxy,
timeout=None,
allow_redirects=False)
if resp.status in {301, 302, 303, 307}:
redir_arr = _aiohttp_do_redirect(self._aio_session, method,
url, headers, data, resp)
if redir_arr is None:
break
method, url, headers, params, data = redir_arr
else:
break
return resp
示例11: __init__
# 需要导入模块: import aiohttp [as 别名]
# 或者: from aiohttp import client [as 别名]
def __init__(self, host,
endpoint_prefix, event_emitter, proxies=None, verify=True,
timeout=DEFAULT_TIMEOUT, response_parser_factory=None,
max_pool_connections=MAX_POOL_CONNECTIONS,
loop=None, connector_args=None):
super().__init__(host, endpoint_prefix,
event_emitter, proxies=proxies, verify=verify,
timeout=timeout,
response_parser_factory=response_parser_factory,
max_pool_connections=max_pool_connections)
if isinstance(timeout, (list, tuple)):
self._conn_timeout, self._read_timeout = timeout
else:
self._conn_timeout = self._read_timeout = timeout
self._loop = loop or asyncio.get_event_loop()
if connector_args is None:
# AWS has a 20 second idle timeout:
# https://forums.aws.amazon.com/message.jspa?messageID=215367
# aiohttp default timeout is 30s so set something reasonable here
connector_args = dict(keepalive_timeout=12)
connector = aiohttp.TCPConnector(loop=self._loop,
limit=max_pool_connections,
verify_ssl=self.verify,
**connector_args)
# This begins the journey into our replacement of aiohttp's
# `read_timeout`. Their implementation represents an absolute time
# from the initial request, to the last read. So if the client delays
# reading the body for long enough the request would be cancelled.
# See https://github.com/aio-libs/aiobotocore/issues/245
assert connector._factory.func == ResponseHandler
connector._factory = functools.partial(
WrappedResponseHandler,
wrapped_read_timeout=self._read_timeout,
*connector._factory.args,
**connector._factory.keywords)
self._aio_session = aiohttp.ClientSession(
connector=connector,
read_timeout=None,
conn_timeout=self._conn_timeout,
skip_auto_headers={'CONTENT-TYPE'},
response_class=ClientResponseProxy,
loop=self._loop)