本文整理汇总了Python中swift.common.utils.quorum_size函数的典型用法代码示例。如果您正苦于以下问题:Python quorum_size函数的具体用法?Python quorum_size怎么用?Python quorum_size使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了quorum_size函数的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: direct_get_container_policy_index
def direct_get_container_policy_index(container_ring, account_name,
container_name):
"""
Talk directly to the primary container servers to figure out the storage
policy index for a given container.
:param container_ring: ring in which to look up the container locations
:param account_name: name of the container's account
:param container_name: name of the container
:returns: storage policy index, or None if it couldn't get a quorum
"""
def _eat_client_exception(*args):
try:
return direct_head_container(*args)
except ClientException as err:
if err.http_status == 404:
return err.http_headers
except (Timeout, socket.error):
pass
pile = GreenPile()
part, nodes = container_ring.get_nodes(account_name, container_name)
for node in nodes:
pile.spawn(_eat_client_exception, node, part, account_name,
container_name)
headers = [x for x in pile if x is not None]
if len(headers) < quorum_size(len(nodes)):
return
return best_policy_index(headers)
示例2: best_response
def best_response(self, req, statuses, reasons, bodies, server_type,
etag=None, headers=None):
"""
Given a list of responses from several servers, choose the best to
return to the API.
:param req: swob.Request object
:param statuses: list of statuses returned
:param reasons: list of reasons for each status
:param bodies: bodies of each response
:param server_type: type of server the responses came from
:param etag: etag
:param headers: headers of each response
:returns: swob.Response object with the correct status, body, etc. set
"""
resp = Response(request=req)
if len(statuses):
for hundred in (HTTP_OK, HTTP_MULTIPLE_CHOICES, HTTP_BAD_REQUEST):
hstatuses = \
[s for s in statuses if hundred <= s < hundred + 100]
if len(hstatuses) >= quorum_size(len(statuses)):
status = max(hstatuses)
status_index = statuses.index(status)
resp.status = '%s %s' % (status, reasons[status_index])
resp.body = bodies[status_index]
if headers:
update_headers(resp, headers[status_index])
if etag:
resp.headers['etag'] = etag.strip('"')
return resp
self.app.logger.error(_('%(type)s returning 503 for %(statuses)s'),
{'type': server_type, 'statuses': statuses})
resp.status = '503 Internal Server Error'
return resp
示例3: _transfer_data
def _transfer_data(self, req, data_source, conns, nodes):
min_conns = quorum_size(len(nodes))
bytes_transferred = 0
try:
with ContextPool(len(nodes)) as pool:
for conn in conns:
conn.failed = False
conn.queue = Queue(self.app.put_queue_depth)
pool.spawn(self._send_file, conn, req.path)
while True:
with ChunkReadTimeout(self.app.client_timeout):
try:
chunk = next(data_source)
except StopIteration:
if req.is_chunked:
for conn in conns:
conn.queue.put('0\r\n\r\n')
break
bytes_transferred += len(chunk)
if bytes_transferred > constraints.MAX_FILE_SIZE:
raise HTTPRequestEntityTooLarge(request=req)
for conn in list(conns):
if not conn.failed:
conn.queue.put(
'%x\r\n%s\r\n' % (len(chunk), chunk)
if req.is_chunked else chunk)
else:
conn.close()
conns.remove(conn)
self._check_min_conn(
req, conns, min_conns,
msg='Object PUT exceptions during'
' send, %(conns)s/%(nodes)s required connections')
for conn in conns:
if conn.queue.unfinished_tasks:
conn.queue.join()
conns = [conn for conn in conns if not conn.failed]
self._check_min_conn(
req, conns, min_conns,
msg='Object PUT exceptions after last send, '
'%(conns)s/%(nodes)s required connections')
except ChunkReadTimeout as err:
self.app.logger.warn(
_('ERROR Client read timeout (%ss)'), err.seconds)
self.app.logger.increment('client_timeouts')
raise HTTPRequestTimeout(request=req)
except HTTPException:
raise
except (Exception, Timeout):
self.app.logger.exception(
_('ERROR Exception causing client disconnect'))
raise HTTPClientDisconnect(request=req)
if req.content_length and bytes_transferred < req.content_length:
req.client_disconnect = True
self.app.logger.warn(
_('Client disconnected without sending enough data'))
self.app.logger.increment('client_disconnects')
raise HTTPClientDisconnect(request=req)
示例4: quorum
def quorum(self):
"""
Quorum concept in the replication case:
floor(number of replica / 2) + 1
"""
if not self.object_ring:
raise PolicyError('Ring is not loaded')
return quorum_size(self.object_ring.replica_count)
示例5: process_container
def process_container(self, dbfile):
"""
Process a container, and update the information in the account.
:param dbfile: container DB to process
"""
start_time = time.time()
broker = ContainerBroker(dbfile, logger=self.logger)
info = broker.get_info()
# Don't send updates if the container was auto-created since it
# definitely doesn't have up to date statistics.
if Timestamp(info['put_timestamp']) <= 0:
return
if self.account_suppressions.get(info['account'], 0) > time.time():
return
if info['put_timestamp'] > info['reported_put_timestamp'] or \
info['delete_timestamp'] > info['reported_delete_timestamp'] \
or info['object_count'] != info['reported_object_count'] or \
info['bytes_used'] != info['reported_bytes_used']:
container = '/%s/%s' % (info['account'], info['container'])
part, nodes = self.get_account_ring().get_nodes(info['account'])
events = [spawn(self.container_report, node, part, container,
info['put_timestamp'], info['delete_timestamp'],
info['object_count'], info['bytes_used'],
info['storage_policy_index'])
for node in nodes]
successes = 0
for event in events:
if is_success(event.wait()):
successes += 1
if successes >= quorum_size(len(events)):
self.logger.increment('successes')
self.successes += 1
self.logger.debug(
_('Update report sent for %(container)s %(dbfile)s'),
{'container': container, 'dbfile': dbfile})
broker.reported(info['put_timestamp'],
info['delete_timestamp'], info['object_count'],
info['bytes_used'])
else:
self.logger.increment('failures')
self.failures += 1
self.logger.debug(
_('Update report failed for %(container)s %(dbfile)s'),
{'container': container, 'dbfile': dbfile})
self.account_suppressions[info['account']] = until = \
time.time() + self.account_suppression_time
if self.new_account_suppressions:
print >>self.new_account_suppressions, \
info['account'], until
# Only track timing data for attempted updates:
self.logger.timing_since('timing', start_time)
else:
self.logger.increment('no_changes')
self.no_changes += 1
示例6: _post_replicate_hook
def _post_replicate_hook(self, broker, info, responses):
if info['account'] == MISPLACED_OBJECTS_ACCOUNT:
return
point = broker.get_reconciler_sync()
if not broker.has_multiple_policies() and info['max_row'] != point:
broker.update_reconciler_sync(info['max_row'])
return
max_sync = self.dump_to_reconciler(broker, point)
success = responses.count(True) >= quorum_size(len(responses))
if max_sync > point and success:
# to be safe, only slide up the sync point with a quorum on
# replication
broker.update_reconciler_sync(max_sync)
示例7: have_quorum
def have_quorum(self, statuses, node_count):
"""
Given a list of statuses from several requests, determine if
a quorum response can already be decided.
:param statuses: list of statuses returned
:param node_count: number of nodes being queried (basically ring count)
:returns: True or False, depending on if quorum is established
"""
quorum = quorum_size(node_count)
if len(statuses) >= quorum:
for hundred in (HTTP_OK, HTTP_MULTIPLE_CHOICES, HTTP_BAD_REQUEST):
if sum(1 for s in statuses if hundred <= s < hundred + 100) >= quorum:
return True
return False
示例8: _check_failure_put_connections
def _check_failure_put_connections(self, conns, req, nodes):
if req.if_none_match is not None and '*' in req.if_none_match:
statuses = [conn.resp.status for conn in conns if conn.resp]
if HTTP_PRECONDITION_FAILED in statuses:
# If we find any copy of the file, it shouldn't be uploaded
self.app.logger.debug(
_('Object PUT returning 412, %(statuses)r'),
{'statuses': statuses})
raise HTTPPreconditionFailed(request=req)
if any(conn for conn in conns if conn.resp and
conn.resp.status == HTTP_CONFLICT):
timestamps = [HeaderKeyDict(conn.resp.getheaders()).get(
'X-Backend-Timestamp') for conn in conns if conn.resp]
self.app.logger.debug(
_('Object PUT returning 202 for 409: '
'%(req_timestamp)s <= %(timestamps)r'),
{'req_timestamp': req.timestamp.internal,
'timestamps': ', '.join(timestamps)})
raise HTTPAccepted(request=req)
min_conns = quorum_size(len(nodes))
self._check_min_conn(req, conns, min_conns)
示例9: add_to_reconciler_queue
def add_to_reconciler_queue(container_ring, account, container, obj,
obj_policy_index, obj_timestamp, op,
force=False, conn_timeout=5, response_timeout=15):
"""
Add an object to the container reconciler's queue. This will cause the
container reconciler to move it from its current storage policy index to
the correct storage policy index.
:param container_ring: container ring
:param account: the misplaced object's account
:param container: the misplaced object's container
:param obj: the misplaced object
:param obj_policy_index: the policy index where the misplaced object
currently is
:param obj_timestamp: the misplaced object's X-Timestamp. We need this to
ensure that the reconciler doesn't overwrite a newer
object with an older one.
:param op: the method of the operation (DELETE or PUT)
:param force: over-write queue entries newer than obj_timestamp
:param conn_timeout: max time to wait for connection to container server
:param response_timeout: max time to wait for response from container
server
:returns: .misplaced_object container name, False on failure. "Success"
means a quorum of containers got the update.
"""
container_name = get_reconciler_container_name(obj_timestamp)
object_name = get_reconciler_obj_name(obj_policy_index, account,
container, obj)
if force:
# this allows an operator to re-enqueue an object that has
# already been popped from the queue to be reprocessed, but
# could potentially prevent out of order updates from making it
# into the queue
x_timestamp = Timestamp(time.time()).internal
else:
x_timestamp = obj_timestamp
q_op_type = get_reconciler_content_type(op)
headers = {
'X-Size': 0,
'X-Etag': obj_timestamp,
'X-Timestamp': x_timestamp,
'X-Content-Type': q_op_type,
}
def _check_success(*args, **kwargs):
try:
direct_put_container_object(*args, **kwargs)
return 1
except (ClientException, Timeout, socket.error):
return 0
pile = GreenPile()
part, nodes = container_ring.get_nodes(MISPLACED_OBJECTS_ACCOUNT,
container_name)
for node in nodes:
pile.spawn(_check_success, node, part, MISPLACED_OBJECTS_ACCOUNT,
container_name, object_name, headers=headers,
conn_timeout=conn_timeout,
response_timeout=response_timeout)
successes = sum(pile)
if successes >= quorum_size(len(nodes)):
return container_name
else:
return False
示例10: PUT
#.........这里部分代码省略.........
copy_headers_into(source_resp, sink_req)
copy_headers_into(req, sink_req)
# copy over x-static-large-object for POSTs and manifest copies
if 'X-Static-Large-Object' in source_resp.headers and \
req.params.get('multipart-manifest') == 'get':
sink_req.headers['X-Static-Large-Object'] = \
source_resp.headers['X-Static-Large-Object']
req = sink_req
req, delete_at_container, delete_at_part, \
delete_at_nodes = self._config_obj_expiration(req)
node_iter = GreenthreadSafeIterator(
self.iter_nodes_local_first(obj_ring, partition))
pile = GreenPile(len(nodes))
te = req.headers.get('transfer-encoding', '')
chunked = ('chunked' in te)
outgoing_headers = self._backend_requests(
req, len(nodes), container_partition, containers,
delete_at_container, delete_at_part, delete_at_nodes)
for nheaders in outgoing_headers:
# RFC2616:8.2.3 disallows 100-continue without a body
if (req.content_length > 0) or chunked:
nheaders['Expect'] = '100-continue'
pile.spawn(self._connect_put_node, node_iter, partition,
req.swift_entity_path, nheaders,
self.app.logger.thread_locals)
conns = [conn for conn in pile if conn]
min_conns = quorum_size(len(nodes))
if req.if_none_match is not None and '*' in req.if_none_match:
statuses = [conn.resp.status for conn in conns if conn.resp]
if HTTP_PRECONDITION_FAILED in statuses:
# If we find any copy of the file, it shouldn't be uploaded
self.app.logger.debug(
_('Object PUT returning 412, %(statuses)r'),
{'statuses': statuses})
return HTTPPreconditionFailed(request=req)
if len(conns) < min_conns:
self.app.logger.error(
_('Object PUT returning 503, %(conns)s/%(nodes)s '
'required connections'),
{'conns': len(conns), 'nodes': min_conns})
return HTTPServiceUnavailable(request=req)
bytes_transferred = 0
try:
with ContextPool(len(nodes)) as pool:
for conn in conns:
conn.failed = False
conn.queue = Queue(self.app.put_queue_depth)
pool.spawn(self._send_file, conn, req.path)
while True:
with ChunkReadTimeout(self.app.client_timeout):
try:
chunk = next(data_source)
except StopIteration:
if chunked:
for conn in conns:
conn.queue.put('0\r\n\r\n')
break
示例11: PUT
#.........这里部分代码省略.........
int(x_delete_at)
/ self.app.expiring_objects_container_divisor
* self.app.expiring_objects_container_divisor
)
delete_at_part, delete_at_nodes = self.app.container_ring.get_nodes(
self.app.expiring_objects_account, delete_at_container
)
else:
delete_at_container = delete_at_part = delete_at_nodes = None
node_iter = GreenthreadSafeIterator(self.iter_nodes_local_first(obj_ring, partition))
pile = GreenPile(len(nodes))
te = req.headers.get("transfer-encoding", "")
chunked = "chunked" in te
outgoing_headers = self._backend_requests(
req, len(nodes), container_partition, containers, delete_at_container, delete_at_part, delete_at_nodes
)
for nheaders in outgoing_headers:
# RFC2616:8.2.3 disallows 100-continue without a body
if (req.content_length > 0) or chunked:
nheaders["Expect"] = "100-continue"
pile.spawn(
self._connect_put_node,
node_iter,
partition,
req.swift_entity_path,
nheaders,
self.app.logger.thread_locals,
)
conns = [conn for conn in pile if conn]
min_conns = quorum_size(len(nodes))
if req.if_none_match is not None and "*" in req.if_none_match:
statuses = [conn.resp.status for conn in conns if conn.resp]
if HTTP_PRECONDITION_FAILED in statuses:
# If we find any copy of the file, it shouldn't be uploaded
self.app.logger.debug(_("Object PUT returning 412, %(statuses)r"), {"statuses": statuses})
return HTTPPreconditionFailed(request=req)
if len(conns) < min_conns:
self.app.logger.error(
_("Object PUT returning 503, %(conns)s/%(nodes)s " "required connections"),
{"conns": len(conns), "nodes": min_conns},
)
return HTTPServiceUnavailable(request=req)
bytes_transferred = 0
try:
with ContextPool(len(nodes)) as pool:
for conn in conns:
conn.failed = False
conn.queue = Queue(self.app.put_queue_depth)
pool.spawn(self._send_file, conn, req.path)
while True:
with ChunkReadTimeout(self.app.client_timeout):
try:
chunk = next(data_source)
except StopIteration:
if chunked:
for conn in conns:
conn.queue.put("0\r\n\r\n")
break
bytes_transferred += len(chunk)
if bytes_transferred > constraints.MAX_FILE_SIZE:
示例12: PUT
#.........这里部分代码省略.........
delete_at_part, delete_at_nodes = \
self.app.container_ring.get_nodes(
self.app.expiring_objects_account, delete_at_container)
else:
delete_at_container = delete_at_part = delete_at_nodes = None
node_iter = GreenthreadSafeIterator(
self.iter_nodes_local_first(self.app.object_ring, partition))
pile = GreenPile(len(nodes))
te = req.headers.get('transfer-encoding', '')
chunked = ('chunked' in te)
outgoing_headers = self._backend_requests(
req, len(nodes), container_partition, containers,
delete_at_container, delete_at_part, delete_at_nodes)
for nheaders in outgoing_headers:
# RFC2616:8.2.3 disallows 100-continue without a body
if (req.content_length > 0) or chunked:
nheaders['Expect'] = '100-continue'
################################# CHANGED_CODE ###################################################################
# Replaced node_iter by nodes in the following line to make sure that a new list with different order isnt used.
# Change from node_iter to nodes to make sure it writes to the same device.
# Without this, it gets a new list of nodes from the ring in a different order and connects to the first one.
pile.spawn(self._connect_put_node, nodes, partition,
req.swift_entity_path, nheaders,
self.app.logger.thread_locals)
################################# CHANGED_CODE ###################################################################
conns = [conn for conn in pile if conn]
min_conns = quorum_size(len(nodes))
if req.if_none_match is not None and '*' in req.if_none_match:
statuses = [conn.resp.status for conn in conns if conn.resp]
if HTTP_PRECONDITION_FAILED in statuses:
# If we find any copy of the file, it shouldn't be uploaded
self.app.logger.debug(
_('Object PUT returning 412, %(statuses)r'),
{'statuses': statuses})
return HTTPPreconditionFailed(request=req)
if len(conns) < min_conns:
self.app.logger.error(
_('Object PUT returning 503, %(conns)s/%(nodes)s '
'required connections'),
{'conns': len(conns), 'nodes': min_conns})
return HTTPServiceUnavailable(request=req)
bytes_transferred = 0
#### CHANGED CODE ####
key = hash_path(self.account_name,self.container_name,self.object_name)
os.system("mkdir -p /SSD/"+str(partition)+"/"+str(key[-3:])+"/"+str(key))
f= open("/SSD/"+str(partition)+"/"+str(key[-3:])+"/"+str(key)+"/"+str(self.object_name),"w")
####
try:
with ContextPool(len(nodes)) as pool:
for conn in conns:
conn.failed = False
conn.queue = Queue(self.app.put_queue_depth)
pool.spawn(self._send_file, conn, req.path)
while True:
with ChunkReadTimeout(self.app.client_timeout):
示例13: PUT
#.........这里部分代码省略.........
return HTTPBadRequest(request=req, content_type='text/plain',
body='Non-integer X-Delete-At')
req.environ.setdefault('swift.log_info', []).append(
'x-delete-at:%s' % x_delete_at)
delete_at_container = normalize_delete_at_timestamp(
int(x_delete_at) /
self.app.expiring_objects_container_divisor *
self.app.expiring_objects_container_divisor)
delete_at_part, delete_at_nodes = \
self.app.container_ring.get_nodes(
self.app.expiring_objects_account, delete_at_container)
else:
delete_at_container = delete_at_part = delete_at_nodes = None
node_iter = GreenthreadSafeIterator(
self.iter_nodes_local_first(self.app.object_ring, partition))
pile = GreenPile(len(nodes))
te = req.headers.get('transfer-encoding', '')
chunked = ('chunked' in te)
outgoing_headers = self._backend_requests(
req, len(nodes), container_partition, containers,
delete_at_container, delete_at_part, delete_at_nodes)
for nheaders in outgoing_headers:
# RFC2616:8.2.3 disallows 100-continue without a body
if (req.content_length > 0) or chunked:
nheaders['Expect'] = '100-continue'
pile.spawn(self._connect_put_node, node_iter, partition,
req.swift_entity_path, nheaders,
self.app.logger.thread_locals)
conns = [conn for conn in pile if conn]
min_conns = quorum_size(len(nodes))
if len(conns) < min_conns:
self.app.logger.error(
_('Object PUT returning 503, %(conns)s/%(nodes)s '
'required connections'),
{'conns': len(conns), 'nodes': min_conns})
return HTTPServiceUnavailable(request=req)
bytes_transferred = 0
try:
with ContextPool(len(nodes)) as pool:
for conn in conns:
conn.failed = False
conn.queue = Queue(self.app.put_queue_depth)
pool.spawn(self._send_file, conn, req.path)
while True:
with ChunkReadTimeout(self.app.client_timeout):
try:
chunk = next(data_source)
except StopIteration:
if chunked:
for conn in conns:
conn.queue.put('0\r\n\r\n')
break
bytes_transferred += len(chunk)
if bytes_transferred > MAX_FILE_SIZE:
return HTTPRequestEntityTooLarge(request=req)
for conn in list(conns):
if not conn.failed:
conn.queue.put(
'%x\r\n%s\r\n' % (len(chunk), chunk)
if chunked else chunk)
else:
conns.remove(conn)
示例14: process_container
def process_container(self, dbfile):
"""
Process a container, and update the information in the account.
:param dbfile: container DB to process
"""
start_time = time.time()
broker = ContainerBroker(dbfile, logger=self.logger)
info = broker.get_info()
# Don't send updates if the container was auto-created since it
# definitely doesn't have up to date statistics.
if Timestamp(info["put_timestamp"]) <= 0:
return
if self.account_suppressions.get(info["account"], 0) > time.time():
return
if (
info["put_timestamp"] > info["reported_put_timestamp"]
or info["delete_timestamp"] > info["reported_delete_timestamp"]
or info["object_count"] != info["reported_object_count"]
or info["bytes_used"] != info["reported_bytes_used"]
):
container = "/%s/%s" % (info["account"], info["container"])
part, nodes = self.get_account_ring().get_nodes(info["account"])
events = [
spawn(
self.container_report,
node,
part,
container,
info["put_timestamp"],
info["delete_timestamp"],
info["object_count"],
info["bytes_used"],
info["storage_policy_index"],
)
for node in nodes
]
successes = 0
for event in events:
if is_success(event.wait()):
successes += 1
if successes >= quorum_size(len(events)):
self.logger.increment("successes")
self.successes += 1
self.logger.debug(
_("Update report sent for %(container)s %(dbfile)s"), {"container": container, "dbfile": dbfile}
)
broker.reported(
info["put_timestamp"], info["delete_timestamp"], info["object_count"], info["bytes_used"]
)
else:
self.logger.increment("failures")
self.failures += 1
self.logger.debug(
_("Update report failed for %(container)s %(dbfile)s"), {"container": container, "dbfile": dbfile}
)
self.account_suppressions[info["account"]] = until = time.time() + self.account_suppression_time
if self.new_account_suppressions:
print >>self.new_account_suppressions, info["account"], until
# Only track timing data for attempted updates:
self.logger.timing_since("timing", start_time)
else:
self.logger.increment("no_changes")
self.no_changes += 1