本文整理汇总了Python中swift._函数的典型用法代码示例。如果您正苦于以下问题:Python _函数的具体用法?Python _怎么用?Python _使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了_函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _exception_occurred
def _exception_occurred(self, server, e, action='talking',
sock=None, fp=None):
if isinstance(e, socket.timeout):
logging.error(_("Timeout %(action)s to memcached: %(server)s"),
{'action': action, 'server': server})
else:
logging.exception(_("Error %(action)s to memcached: %(server)s"),
{'action': action, 'server': server})
try:
if fp:
fp.close()
del fp
except Exception:
pass
try:
if sock:
sock.close()
del sock
except Exception:
pass
now = time.time()
self._errors[server].append(time.time())
if len(self._errors[server]) > ERROR_LIMIT_COUNT:
self._errors[server] = [err for err in self._errors[server]
if err > now - ERROR_LIMIT_TIME]
if len(self._errors[server]) > ERROR_LIMIT_COUNT:
self._error_limited[server] = now + ERROR_LIMIT_DURATION
logging.error(_('Error limiting server %s'), server)
示例2: status
def status(self, pids=None, **kwargs):
"""Display status of server
:param: pids, if not supplied pids will be populated automatically
:param: number, if supplied will only lookup the nth server
:returns: 1 if server is not running, 0 otherwise
"""
if pids is None:
pids = self.get_running_pids(**kwargs)
if not pids:
number = kwargs.get('number', 0)
if number:
kwargs['quiet'] = True
conf_files = self.conf_files(**kwargs)
if conf_files:
print(_("%s #%d not running (%s)") % (self.server, number,
conf_files[0]))
else:
print(_("No %s running") % self.server)
return 1
for pid, pid_file in pids.items():
conf_file = self.get_conf_file_name(pid_file)
print(_("%s running (%s - %s)") % (self.server, pid, conf_file))
return 0
示例3: _exception_occurred
def _exception_occurred(self, server, e, action='talking',
sock=None, fp=None, got_connection=True):
if isinstance(e, Timeout):
logging.error(_("Timeout %(action)s to memcached: %(server)s"),
{'action': action, 'server': server})
else:
logging.exception(_("Error %(action)s to memcached: %(server)s"),
{'action': action, 'server': server})
try:
if fp:
fp.close()
del fp
except Exception:
pass
try:
if sock:
sock.close()
del sock
except Exception:
pass
if got_connection:
# We need to return something to the pool
# A new connection will be created the next time it is retrieved
self._return_conn(server, None, None)
now = time.time()
self._errors[server].append(time.time())
if len(self._errors[server]) > ERROR_LIMIT_COUNT:
self._errors[server] = [err for err in self._errors[server]
if err > now - ERROR_LIMIT_TIME]
if len(self._errors[server]) > ERROR_LIMIT_COUNT:
self._error_limited[server] = now + ERROR_LIMIT_DURATION
logging.error(_('Error limiting server %s'), server)
示例4: run_once
def run_once(self, *args, **kwargs):
self._zero_stats()
self.logger.info(_("Running object replicator in script mode."))
override_devices = list_from_csv(kwargs.get('devices'))
override_partitions = list_from_csv(kwargs.get('partitions'))
override_policies = list_from_csv(kwargs.get('policies'))
if not override_devices:
override_devices = None
if not override_partitions:
override_partitions = None
if not override_policies:
override_policies = None
self.replicate(
override_devices=override_devices,
override_partitions=override_partitions,
override_policies=override_policies)
total = (time.time() - self.stats['start']) / 60
self.logger.info(
_("Object replication complete (once). (%.02f minutes)"), total)
if not (override_partitions or override_devices):
replication_last = time.time()
dump_recon_cache({'replication_stats': self.stats,
'replication_time': total,
'replication_last': replication_last,
'object_replication_time': total,
'object_replication_last': replication_last},
self.rcache, self.logger)
示例5: _get_response
def _get_response(self, node, part, path, headers, policy):
"""
Helper method for reconstruction that GETs a single EC fragment
archive
:param node: the node to GET from
:param part: the partition
:param path: full path of the desired EC archive
:param headers: the headers to send
:param policy: an instance of
:class:`~swift.common.storage_policy.BaseStoragePolicy`
:returns: response
"""
resp = None
try:
with ConnectionTimeout(self.conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'],
part, 'GET', path, headers=headers)
with Timeout(self.node_timeout):
resp = conn.getresponse()
if resp.status not in [HTTP_OK, HTTP_NOT_FOUND]:
self.logger.warning(
_("Invalid response %(resp)s from %(full_path)s"),
{'resp': resp.status,
'full_path': self._full_path(node, part, path, policy)})
resp = None
elif resp.status == HTTP_NOT_FOUND:
resp = None
except (Exception, Timeout):
self.logger.exception(
_("Trying to GET %(full_path)s"), {
'full_path': self._full_path(node, part, path, policy)})
return resp
示例6: run_forever
def run_forever(self, *args, **kwargs):
"""
Run the updater continuously.
"""
time.sleep(random() * self.interval)
while True:
self.logger.info(_('Begin container update sweep'))
begin = time.time()
now = time.time()
expired_suppressions = \
[a for a, u in self.account_suppressions.items()
if u < now]
for account in expired_suppressions:
del self.account_suppressions[account]
pid2filename = {}
# read from account ring to ensure it's fresh
self.get_account_ring().get_nodes('')
for path in self.get_paths():
while len(pid2filename) >= self.concurrency:
pid = os.wait()[0]
try:
self._load_suppressions(pid2filename[pid])
finally:
del pid2filename[pid]
fd, tmpfilename = mkstemp()
os.close(fd)
pid = os.fork()
if pid:
pid2filename[pid] = tmpfilename
else:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
eventlet_monkey_patch()
self.no_changes = 0
self.successes = 0
self.failures = 0
self.new_account_suppressions = open(tmpfilename, 'w')
forkbegin = time.time()
self.container_sweep(path)
elapsed = time.time() - forkbegin
self.logger.debug(
_('Container update sweep of %(path)s completed: '
'%(elapsed).02fs, %(success)s successes, %(fail)s '
'failures, %(no_change)s with no changes'),
{'path': path, 'elapsed': elapsed,
'success': self.successes, 'fail': self.failures,
'no_change': self.no_changes})
sys.exit()
while pid2filename:
pid = os.wait()[0]
try:
self._load_suppressions(pid2filename[pid])
finally:
del pid2filename[pid]
elapsed = time.time() - begin
self.logger.info(_('Container update sweep completed: %.02fs'),
elapsed)
dump_recon_cache({'container_updater_sweep': elapsed},
self.rcache, self.logger)
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
示例7: download
def download(self, log_files, sort='time', limit=-1, nfl_filter='',
output_format='default'):
if len(log_files) == 0:
raise NotFoundException(_('no log file found'))
try:
nfl_esc = nfl_filter.replace('(', '\(').replace(')', '\)')
# remove the slash that is intentionally added in the URL
# to avoid failure of filtering stats data.
if nfl_esc.startswith('/'):
nfl_esc = nfl_esc[1:]
stats = Stats2(*log_files)
stats.sort_stats(sort)
if output_format == 'python':
data = self.format_source_code(nfl_filter)
elif output_format == 'json':
data = stats.to_json(nfl_esc, limit)
elif output_format == 'csv':
data = stats.to_csv(nfl_esc, limit)
elif output_format == 'ods':
data = stats.to_ods(nfl_esc, limit)
else:
data = stats.print_stats()
return data, [('content-type', self.format_dict[output_format])]
except ODFLIBNotInstalled:
raise
except Exception as ex:
raise ProfileException(_('Data download error: %s') % ex)
示例8: format_source_code
def format_source_code(self, nfl):
nfls = re.split('[:()]', nfl)
file_path = nfls[0]
try:
lineno = int(nfls[1])
except (TypeError, ValueError, IndexError):
lineno = 0
# for security reason, this need to be fixed.
if not file_path.endswith('.py'):
return _('The file type are forbidden to access!')
try:
data = []
i = 0
with open(file_path) as f:
lines = f.readlines()
max_width = str(len(str(len(lines))))
fmt = '<span id="L%d" rel="#L%d">%' + max_width\
+ 'd|<code>%s</code></span>'
for line in lines:
l = cgi.escape(line, quote=None)
i = i + 1
if i == lineno:
fmt2 = '<span id="L%d" style="background-color: \
rgb(127,255,127)">%' + max_width +\
'd|<code>%s</code></span>'
data.append(fmt2 % (i, i, l))
else:
data.append(fmt % (i, i, i, l))
data = ''.join(data)
except Exception:
return _('Can not access the file %s.') % file_path
return '<pre>%s</pre>' % data
示例9: conf_files
def conf_files(self, **kwargs):
"""Get conf files for this server
:param: number, if supplied will only lookup the nth server
:returns: list of conf files
"""
if self.server in STANDALONE_SERVERS:
found_conf_files = search_tree(SWIFT_DIR, self.server + '*',
'.conf', dir_ext='.conf.d')
else:
found_conf_files = search_tree(SWIFT_DIR, '%s-server*' % self.type,
'.conf', dir_ext='.conf.d')
number = kwargs.get('number')
if number:
try:
conf_files = [found_conf_files[number - 1]]
except IndexError:
conf_files = []
else:
conf_files = found_conf_files
if not conf_files:
# maybe there's a config file(s) out there, but I couldn't find it!
if not kwargs.get('quiet'):
print _('Unable to locate config %sfor %s') % (
('number %s ' % number if number else ''), self.server)
if kwargs.get('verbose') and not kwargs.get('quiet'):
if found_conf_files:
print _('Found configs:')
for i, conf_file in enumerate(found_conf_files):
print ' %d) %s' % (i + 1, conf_file)
return conf_files
示例10: object_update
def object_update(self, node, part, op, obj, headers_out):
"""
Perform the object update to the container
:param node: node dictionary from the container ring
:param part: partition that holds the container
:param op: operation performed (ex: 'PUT' or 'DELETE')
:param obj: object name being updated
:param headers_out: headers to send with the update
"""
try:
with ConnectionTimeout(self.conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'],
part, op, obj, headers_out)
with Timeout(self.node_timeout):
resp = conn.getresponse()
resp.read()
success = is_success(resp.status)
if not success:
self.logger.debug(
_('Error code %(status)d is returned from remote '
'server %(ip)s: %(port)s / %(device)s'),
{'status': resp.status, 'ip': node['ip'],
'port': node['port'], 'device': node['device']})
return (success, node['id'])
except (Exception, Timeout):
self.logger.exception(_('ERROR with remote server '
'%(ip)s:%(port)s/%(device)s'), node)
return HTTP_INTERNAL_SERVER_ERROR, node['id']
示例11: run_once
def run_once(self, *args, **kwargs):
"""Run a replication pass once."""
self._zero_stats()
dirs = []
ips = whataremyips()
if not ips:
self.logger.error(_('ERROR Failed to get my own IPs?'))
return
self._local_device_ids = set()
for node in self.ring.devs:
if node and is_local_device(ips, self.port,
node['replication_ip'],
node['replication_port']):
if self.mount_check and not ismount(
os.path.join(self.root, node['device'])):
self.logger.warn(
_('Skipping %(device)s as it is not mounted') % node)
continue
unlink_older_than(
os.path.join(self.root, node['device'], 'tmp'),
time.time() - self.reclaim_age)
datadir = os.path.join(self.root, node['device'], self.datadir)
if os.path.isdir(datadir):
self._local_device_ids.add(node['id'])
dirs.append((datadir, node['id']))
self.logger.info(_('Beginning replication run'))
for part, object_file, node_id in roundrobin_datadirs(dirs):
self.cpool.spawn_n(
self._replicate_object, part, object_file, node_id)
self.cpool.waitall()
self.logger.info(_('Replication run OVER'))
self._report_stats()
示例12: _connect_put_node
def _connect_put_node(self, nodes, part, path, headers,
logger_thread_locals):
"""Method for a file PUT connect"""
self.app.logger.thread_locals = logger_thread_locals
for node in nodes:
try:
start_time = time.time()
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(
node['ip'], node['port'], node['device'], part, 'PUT',
path, headers)
self.app.set_node_timing(node, time.time() - start_time)
with Timeout(self.app.node_timeout):
resp = conn.getexpect()
if resp.status == HTTP_CONTINUE:
conn.resp = None
conn.node = node
return conn
elif is_success(resp.status):
conn.resp = resp
conn.node = node
return conn
elif headers['If-None-Match'] is not None and \
resp.status == HTTP_PRECONDITION_FAILED:
conn.resp = resp
conn.node = node
return conn
elif resp.status == HTTP_INSUFFICIENT_STORAGE:
self.app.error_limit(node, _('ERROR Insufficient Storage'))
except (Exception, Timeout):
self.app.exception_occurred(
node, _('Object'),
_('Expect: 100-continue on %s') % path)
示例13: _make_app_iter
def _make_app_iter(self, node, source):
"""
Returns an iterator over the contents of the source (via its read
func). There is also quite a bit of cleanup to ensure garbage
collection works and the underlying socket of the source is closed.
:param source: The httplib.Response object this iterator should read
from.
:param node: The node the source is reading from, for logging purposes.
"""
try:
# Spawn reader to read from the source and place in the queue.
# We then drop any reference to the source or node, for garbage
# collection purposes.
queue = Queue(1)
spawn_n(self._make_app_iter_reader, node, source, queue,
self.app.logger.thread_locals)
source = node = None
while True:
chunk = queue.get(timeout=self.app.node_timeout)
if isinstance(chunk, bool): # terminator
success = chunk
if not success:
raise Exception(_('Failed to read all data'
' from the source'))
break
yield chunk
except Empty:
raise ChunkReadTimeout()
except (GeneratorExit, Timeout):
self.app.logger.warn(_('Client disconnected on read'))
except Exception:
self.app.logger.exception(_('Trying to send to client'))
raise
示例14: stop
def stop(self, **kwargs):
"""stops a server
"""
server_pids = {}
for server in self.servers:
signaled_pids = server.stop(**kwargs)
if not signaled_pids:
print(_('No %s running') % server)
else:
server_pids[server] = signaled_pids
# all signaled_pids, i.e. list(itertools.chain(*server_pids.values()))
signaled_pids = [p for server, pids in server_pids.items()
for p in pids]
# keep track of the pids yeiled back as killed for all servers
killed_pids = set()
kill_wait = kwargs.get('kill_wait', KILL_WAIT)
for server, killed_pid in watch_server_pids(server_pids,
interval=kill_wait,
**kwargs):
print(_("%s (%s) appears to have stopped") % (server, killed_pid))
killed_pids.add(killed_pid)
if not killed_pids.symmetric_difference(signaled_pids):
# all processes have been stopped
return 0
# reached interval n watch_pids w/o killing all servers
for server, pids in server_pids.items():
if not killed_pids.issuperset(pids):
# some pids of this server were not killed
print(_('Waited %s seconds for %s to die; giving up') % (
kill_wait, server))
return 1
示例15: get_keys
def get_keys(self, env, required=None):
# Get the key(s) from the keymaster
required = required if required is not None else [self.server_type]
try:
fetch_crypto_keys = env[CRYPTO_KEY_CALLBACK]
except KeyError:
self.logger.exception(_('ERROR get_keys() missing callback'))
raise HTTPInternalServerError(
"Unable to retrieve encryption keys.")
try:
keys = fetch_crypto_keys()
except Exception as err: # noqa
self.logger.exception(_(
'ERROR get_keys(): from callback: %s') % err)
raise HTTPInternalServerError(
"Unable to retrieve encryption keys.")
for name in required:
try:
key = keys[name]
self.crypto.check_key(key)
continue
except KeyError:
self.logger.exception(_("Missing key for %r") % name)
except TypeError:
self.logger.exception(_("Did not get a keys dict"))
except ValueError as e:
# don't include the key in any messages!
self.logger.exception(_("Bad key for %(name)r: %(err)s") %
{'name': name, 'err': e})
raise HTTPInternalServerError(
"Unable to retrieve encryption keys.")
return keys