本文整理匯總了Python中pyelasticsearch.downtime.DowntimePronePool.mark_dead方法的典型用法代碼示例。如果您正苦於以下問題:Python DowntimePronePool.mark_dead方法的具體用法?Python DowntimePronePool.mark_dead怎麽用?Python DowntimePronePool.mark_dead使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類pyelasticsearch.downtime.DowntimePronePool
的用法示例。
在下文中一共展示了DowntimePronePool.mark_dead方法的2個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: ElasticSearch
# 需要導入模塊: from pyelasticsearch.downtime import DowntimePronePool [as 別名]
# 或者: from pyelasticsearch.downtime.DowntimePronePool import mark_dead [as 別名]
#.........這裏部分代碼省略.........
and retrying.
Retry the request on different servers if the first one is down and
``self.max_retries`` > 0.
:arg method: An HTTP method, like "GET"
:arg path_components: An iterable of path components, to be joined by
"/"
:arg body: The request body
:arg query_params: A map of querystring param names to values or
``None``
:arg encode_body: Whether to encode the body of the request as JSON
"""
path = self._join_path(path_components)
if query_params:
path = "?".join(
[path, urlencode(dict((k, self._utf8(self._to_query(v))) for k, v in iteritems(query_params)))]
)
request_body = self._encode_json(body) if encode_body else body
req_method = getattr(self.session, method.lower())
# We do our own retrying rather than using urllib3's; we want to retry
# a different node in the cluster if possible, not the same one again
# (which may be down).
for attempt in xrange(self.max_retries + 1):
server_url, was_dead = self.servers.get()
url = server_url + path
self.logger.debug("Making a request equivalent to this: curl -X%s '%s' -d '%s'", method, url, request_body)
try:
resp = req_method(url, timeout=self.timeout, **({"data": request_body} if body else {}))
except (ConnectionError, Timeout):
self.servers.mark_dead(server_url)
self.logger.info("%s marked as dead for %s seconds.", server_url, self.revival_delay)
if attempt >= self.max_retries:
raise
else:
if was_dead:
self.servers.mark_live(server_url)
break
self.logger.debug("response status: %s", resp.status_code)
prepped_response = self._decode_response(resp)
if resp.status_code >= 400:
self._raise_exception(resp, prepped_response)
self.logger.debug("got response %s", prepped_response)
return prepped_response
def _raise_exception(self, response, decoded_body):
"""Raise an exception based on an error-indicating response from ES."""
error_message = decoded_body.get("error", decoded_body)
error_class = ElasticHttpError
if response.status_code == 404:
error_class = ElasticHttpNotFoundError
elif (
error_message.startswith("IndexAlreadyExistsException")
or "nested: IndexAlreadyExistsException" in error_message
):
error_class = IndexAlreadyExistsError
raise error_class(response.status_code, error_message)
def _encode_json(self, value):
"""
示例2: ElasticSearch
# 需要導入模塊: from pyelasticsearch.downtime import DowntimePronePool [as 別名]
# 或者: from pyelasticsearch.downtime.DowntimePronePool import mark_dead [as 別名]
#.........這裏部分代碼省略.........
"""
def join_path(path_components):
"""Smush together the path components, ignoring empty ones."""
path = '/'.join(str(p) for p in path_components if p)
if not path.startswith('/'):
path = '/' + path
return path
path = join_path(path_components)
if query_params:
path = '?'.join(
[path, urlencode(dict((k, self._to_query(v)) for k, v in
query_params.iteritems()))])
kwargs = ({'data': self._encode_json(body) if encode_body else body}
if body else {})
req_method = getattr(self.session, method.lower())
# We do our own retrying rather than using urllib3's; we want to retry
# a different node in the cluster if possible, not the same one again
# (which may be down).
for attempt in xrange(self.max_retries + 1):
server_url, was_dead = self.servers.get()
url = server_url + path
self.logger.debug(
'making %s request to path: %s %s with body: %s',
method, url, path, kwargs.get('data', {}))
try:
# prefetch=True so the connection can be quickly returned to
# the pool. This is the default in requests >=0.3.16.
resp = req_method(
url, prefetch=True, timeout=self.timeout, **kwargs)
except (ConnectionError, Timeout):
self.servers.mark_dead(server_url)
self.logger.info('%s marked as dead for %s seconds.',
server_url,
self.revival_delay)
if attempt >= self.max_retries:
raise
else:
if was_dead:
self.servers.mark_live(server_url)
break
self.logger.debug('response status: %s', resp.status_code)
prepped_response = self._decode_response(resp)
if resp.status_code >= 400:
error_class = (ElasticHttpNotFoundError if resp.status_code == 404
else ElasticHttpError)
raise error_class(
resp.status_code,
prepped_response.get('error', prepped_response))
self.logger.debug('got response %s', prepped_response)
return prepped_response
def _encode_json(self, body):
"""Return body encoded as JSON."""
return json.dumps(body, cls=self.json_encoder)
def _decode_response(self, response):
"""Return a native-Python representation of a response's JSON blob."""
json_response = response.json
if json_response is None:
raise InvalidJsonResponseError(response)
return json_response