本文整理汇总了Python中swift.common.utils.list_from_csv函数的典型用法代码示例。如果您正苦于以下问题:Python list_from_csv函数的具体用法?Python list_from_csv怎么用?Python list_from_csv使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了list_from_csv函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run_once
def run_once(self, *args, **kwargs):
self._zero_stats()
self.logger.info(_("Running object replicator in script mode."))
override_devices = list_from_csv(kwargs.get("devices"))
override_partitions = list_from_csv(kwargs.get("partitions"))
override_policies = list_from_csv(kwargs.get("policies"))
if not override_devices:
override_devices = None
if not override_partitions:
override_partitions = None
if not override_policies:
override_policies = None
self.replicate(
override_devices=override_devices,
override_partitions=override_partitions,
override_policies=override_policies,
)
total = (time.time() - self.stats["start"]) / 60
self.logger.info(_("Object replication complete (once). (%.02f minutes)"), total)
if not (override_partitions or override_devices):
replication_last = time.time()
dump_recon_cache(
{
"replication_stats": self.stats,
"replication_time": total,
"replication_last": replication_last,
"object_replication_time": total,
"object_replication_last": replication_last,
},
self.rcache,
self.logger,
)
示例2: _integral_keystone_identity
def _integral_keystone_identity(self, environ):
"""Extract the identity from the Keystone auth component."""
if environ.get("HTTP_X_IDENTITY_STATUS") != "Confirmed":
return
roles = list_from_csv(environ.get("HTTP_X_ROLES", ""))
service_roles = list_from_csv(environ.get("HTTP_X_SERVICE_ROLES", ""))
identity = {
"user": (environ.get("HTTP_X_USER_ID"), environ.get("HTTP_X_USER_NAME")),
"tenant": (environ.get("HTTP_X_TENANT_ID"), environ.get("HTTP_X_TENANT_NAME")),
"roles": roles,
"service_roles": service_roles,
}
token_info = environ.get("keystone.token_info", {})
auth_version = 0
user_domain = project_domain = (None, None)
if "access" in token_info:
# ignore any domain id headers that authtoken may have set
auth_version = 2
elif "token" in token_info:
auth_version = 3
user_domain = (environ.get("HTTP_X_USER_DOMAIN_ID"), environ.get("HTTP_X_USER_DOMAIN_NAME"))
project_domain = (environ.get("HTTP_X_PROJECT_DOMAIN_ID"), environ.get("HTTP_X_PROJECT_DOMAIN_NAME"))
identity["user_domain"] = user_domain
identity["project_domain"] = project_domain
identity["auth_version"] = auth_version
return identity
示例3: run_once
def run_once(self, *args, **kwargs):
self._zero_stats()
self.logger.info(_("Running object replicator in script mode."))
override_devices = list_from_csv(kwargs.get('devices'))
override_partitions = list_from_csv(kwargs.get('partitions'))
override_policies = list_from_csv(kwargs.get('policies'))
if not override_devices:
override_devices = None
if not override_partitions:
override_partitions = None
if not override_policies:
override_policies = None
self.replicate(
override_devices=override_devices,
override_partitions=override_partitions,
override_policies=override_policies)
total = (time.time() - self.stats['start']) / 60
self.logger.info(
_("Object replication complete (once). (%.02f minutes)"), total)
if not (override_partitions or override_devices):
replication_last = time.time()
dump_recon_cache({'replication_stats': self.stats,
'replication_time': total,
'replication_last': replication_last,
'object_replication_time': total,
'object_replication_last': replication_last},
self.rcache, self.logger)
示例4: _integral_keystone_identity
def _integral_keystone_identity(self, environ):
"""Extract the identity from the Keystone auth component."""
if environ.get('HTTP_X_IDENTITY_STATUS') != 'Confirmed':
return
roles = list_from_csv(environ.get('HTTP_X_ROLES', ''))
service_roles = list_from_csv(environ.get('HTTP_X_SERVICE_ROLES', ''))
identity = {'user': (environ.get('HTTP_X_USER_ID'),
environ.get('HTTP_X_USER_NAME')),
'tenant': (environ.get('HTTP_X_TENANT_ID'),
environ.get('HTTP_X_TENANT_NAME')),
'roles': roles,
'service_roles': service_roles}
token_info = environ.get('keystone.token_info', {})
auth_version = 0
user_domain = project_domain = (None, None)
if 'access' in token_info:
# ignore any domain id headers that authtoken may have set
auth_version = 2
elif 'token' in token_info:
auth_version = 3
user_domain = (environ.get('HTTP_X_USER_DOMAIN_ID'),
environ.get('HTTP_X_USER_DOMAIN_NAME'))
project_domain = (environ.get('HTTP_X_PROJECT_DOMAIN_ID'),
environ.get('HTTP_X_PROJECT_DOMAIN_NAME'))
identity['user_domain'] = user_domain
identity['project_domain'] = project_domain
identity['auth_version'] = auth_version
return identity
示例5: run_once
def run_once(self, *args, **kwargs):
start = time.time()
self.logger.info(_("Running object replicator in script mode."))
override_devices = list_from_csv(kwargs.get('devices'))
override_partitions = list_from_csv(kwargs.get('partitions'))
override_policies = list_from_csv(kwargs.get('policies'))
if not override_devices:
override_devices = None
if not override_partitions:
override_partitions = None
if not override_policies:
override_policies = None
###################################### CHANGED_CODE ########################################################
override_devices = ['sda4']
###################################### CHANGED_CODE ########################################################
self.replicate(
override_devices=override_devices,
override_partitions=override_partitions,
override_policies=override_policies)
total = (time.time() - start) / 60
self.logger.info(
_("Object replication complete (once). (%.02f minutes)"), total)
if not (override_partitions or override_devices):
dump_recon_cache({'object_replication_time': total,
'object_replication_last': time.time()},
self.rcache, self.logger)
示例6: __init__
def __init__(self, app, conf):
if not MODULE_DEPENDENCY_MET:
# reraise the exception if the dependency wasn't met
raise ImportError("dnspython is required for this module")
self.app = app
storage_domain = conf.get("storage_domain", "example.com")
self.storage_domain = ["." + s for s in list_from_csv(storage_domain) if not s.startswith(".")]
self.storage_domain += [s for s in list_from_csv(storage_domain) if s.startswith(".")]
self.lookup_depth = int(conf.get("lookup_depth", "1"))
self.memcache = None
self.logger = get_logger(conf, log_route="cname-lookup")
示例7: run_once
def run_once(self, *args, **kwargs):
start = time.time()
self.logger.info(_("Running object replicator in script mode."))
override_devices = list_from_csv(kwargs.get("devices"))
override_partitions = list_from_csv(kwargs.get("partitions"))
self.replicate(override_devices=override_devices, override_partitions=override_partitions)
total = (time.time() - start) / 60
self.logger.info(_("Object replication complete (once). (%.02f minutes)"), total)
if not (override_partitions or override_devices):
dump_recon_cache(
{"object_replication_time": total, "object_replication_last": time.time()}, self.rcache, self.logger
)
示例8: __init__
def __init__(self, app, conf):
self.app = app
storage_domain = conf.get('storage_domain', 'example.com')
self.storage_domain = ['.' + s for s in
list_from_csv(storage_domain)
if not s.startswith('.')]
self.storage_domain += [s for s in list_from_csv(storage_domain)
if s.startswith('.')]
self.path_root = '/' + conf.get('path_root', 'v1').strip('/')
prefixes = conf.get('reseller_prefixes', 'AUTH')
self.reseller_prefixes = list_from_csv(prefixes)
self.reseller_prefixes_lower = [x.lower()
for x in self.reseller_prefixes]
self.default_reseller_prefix = conf.get('default_reseller_prefix')
示例9: __init__
def __init__(self, app, conf):
if not MODULE_DEPENDENCY_MET:
# reraise the exception if the dependency wasn't met
raise ImportError('dnspython is required for this module')
self.app = app
storage_domain = conf.get('storage_domain', 'example.com')
self.storage_domain = ['.' + s for s in
list_from_csv(storage_domain)
if not s.startswith('.')]
self.storage_domain += [s for s in list_from_csv(storage_domain)
if s.startswith('.')]
self.lookup_depth = int(conf.get('lookup_depth', '1'))
self.memcache = None
self.logger = get_logger(conf, log_route='cname-lookup')
示例10: main
def main():
options, commands = parser.parse_args()
commands.remove('split-brain')
if not commands:
parser.print_help()
return 'ERROR: must specify at least one command'
for cmd_args in commands:
cmd = cmd_args.split(':', 1)[0]
if cmd not in BrainSplitter.__commands__:
parser.print_help()
return 'ERROR: unknown command %s' % cmd
url, token = get_auth('http://127.0.0.1:8080/auth/v1.0',
'test:tester', 'testing')
brain = BrainSplitter(url, token, options.container, options.object)
for cmd_args in commands:
parts = cmd_args.split(':', 1)
command = parts[0]
if len(parts) > 1:
args = utils.list_from_csv(parts[1])
else:
args = ()
try:
brain.run(command, *args)
except ClientException as e:
print '**WARNING**: %s raised %s' % (command, e)
print 'STATUS'.join(['*' * 25] * 2)
brain.servers.status()
sys.exit()
示例11: reload_constraints
def reload_constraints():
"""
Parse SWIFT_CONF_FILE and reset module level global contraint attrs,
populating OVERRIDE_CONSTRAINTS AND EFFECTIVE_CONSTRAINTS along the way.
"""
global SWIFT_CONSTRAINTS_LOADED, OVERRIDE_CONSTRAINTS
SWIFT_CONSTRAINTS_LOADED = False
OVERRIDE_CONSTRAINTS = {}
constraints_conf = ConfigParser()
if constraints_conf.read(utils.SWIFT_CONF_FILE):
SWIFT_CONSTRAINTS_LOADED = True
for name in DEFAULT_CONSTRAINTS:
try:
value = constraints_conf.get('swift-constraints', name)
except NoOptionError:
pass
except NoSectionError:
# We are never going to find the section for another option
break
else:
try:
value = int(value)
except ValueError:
value = utils.list_from_csv(value)
OVERRIDE_CONSTRAINTS[name] = value
for name, default in DEFAULT_CONSTRAINTS.items():
value = OVERRIDE_CONSTRAINTS.get(name, default)
EFFECTIVE_CONSTRAINTS[name] = value
# "globals" in this context is module level globals, always.
globals()[name.upper()] = value
示例12: __init__
def __init__(self, conf, logger, zero_byte_only_at_fps=0):
self.conf = conf
self.logger = logger
self.devices = conf.get("devices", "/srv/node")
self.mount_check = config_true_value(conf.get("mount_check", "true"))
self.max_files_per_second = float(conf.get("files_per_second", 20))
self.max_bytes_per_second = float(conf.get("bytes_per_second", 10000000))
self.auditor_type = "ALL"
self.zero_byte_only_at_fps = zero_byte_only_at_fps
if self.zero_byte_only_at_fps:
self.max_files_per_second = float(self.zero_byte_only_at_fps)
self.auditor_type = "ZBF"
self.log_time = int(conf.get("log_time", 3600))
self.files_running_time = 0
self.bytes_running_time = 0
self.bytes_processed = 0
self.total_bytes_processed = 0
self.total_files_processed = 0
self.passes = 0
self.quarantines = 0
self.errors = 0
self.recon_cache_path = conf.get("recon_cache_path", "/var/cache/swift")
self.rcache = os.path.join(self.recon_cache_path, "object.recon")
self.stats_sizes = sorted([int(s) for s in list_from_csv(conf.get("object_size_stats"))])
self.stats_buckets = dict([(s, 0) for s in self.stats_sizes + ["OVER"]])
示例13: __init__
def __init__(self, app, conf, logger=None):
self.app = app
self.log_hdrs = config_true_value(conf.get("access_log_headers", conf.get("log_headers", "no")))
log_hdrs_only = list_from_csv(conf.get("access_log_headers_only", ""))
self.log_hdrs_only = [x.title() for x in log_hdrs_only]
# The leading access_* check is in case someone assumes that
# log_statsd_valid_http_methods behaves like the other log_statsd_*
# settings.
self.valid_methods = conf.get(
"access_log_statsd_valid_http_methods",
conf.get("log_statsd_valid_http_methods", "GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS"),
)
self.valid_methods = [m.strip().upper() for m in self.valid_methods.split(",") if m.strip()]
access_log_conf = {}
for key in (
"log_facility",
"log_name",
"log_level",
"log_udp_host",
"log_udp_port",
"log_statsd_host",
"log_statsd_port",
"log_statsd_default_sample_rate",
"log_statsd_sample_rate_factor",
"log_statsd_metric_prefix",
):
value = conf.get("access_" + key, conf.get(key, None))
if value:
access_log_conf[key] = value
self.access_logger = logger or get_logger(access_log_conf, log_route="proxy-access")
self.access_logger.set_statsd_prefix("proxy-server")
self.reveal_sensitive_prefix = int(conf.get("reveal_sensitive_prefix", MAX_HEADER_SIZE))
示例14: __init__
def __init__(self, app, conf, logger=None):
self.app = app
self.log_hdrs = config_true_value(conf.get(
'access_log_headers',
conf.get('log_headers', 'no')))
log_hdrs_only = list_from_csv(conf.get(
'access_log_headers_only', ''))
self.log_hdrs_only = [x.title() for x in log_hdrs_only]
# The leading access_* check is in case someone assumes that
# log_statsd_valid_http_methods behaves like the other log_statsd_*
# settings.
self.valid_methods = conf.get(
'access_log_statsd_valid_http_methods',
conf.get('log_statsd_valid_http_methods',
'GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS'))
self.valid_methods = [m.strip().upper() for m in
self.valid_methods.split(',') if m.strip()]
access_log_conf = {}
for key in ('log_facility', 'log_name', 'log_level', 'log_udp_host',
'log_udp_port', 'log_statsd_host', 'log_statsd_port',
'log_statsd_default_sample_rate',
'log_statsd_sample_rate_factor',
'log_statsd_metric_prefix'):
value = conf.get('access_' + key, conf.get(key, None))
if value:
access_log_conf[key] = value
self.access_logger = logger or get_logger(access_log_conf,
log_route='proxy-access')
self.access_logger.set_statsd_prefix('proxy-server')
self.reveal_sensitive_prefix = int(
conf.get('reveal_sensitive_prefix', 16))
示例15: __init__
def __init__(self, conf, logger, zero_byte_only_at_fps=0):
self.conf = conf
self.logger = logger
self.devices = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.max_files_per_second = float(conf.get('files_per_second', 20))
self.max_bytes_per_second = float(conf.get('bytes_per_second',
10000000))
self.auditor_type = 'ALL'
self.zero_byte_only_at_fps = zero_byte_only_at_fps
if self.zero_byte_only_at_fps:
self.max_files_per_second = float(self.zero_byte_only_at_fps)
self.auditor_type = 'ZBF'
self.log_time = int(conf.get('log_time', 3600))
self.files_running_time = 0
self.bytes_running_time = 0
self.bytes_processed = 0
self.total_bytes_processed = 0
self.total_files_processed = 0
self.passes = 0
self.quarantines = 0
self.errors = 0
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = os.path.join(self.recon_cache_path, "object.recon")
self.stats_sizes = sorted(
[int(s) for s in list_from_csv(conf.get('object_size_stats'))])
self.stats_buckets = dict(
[(s, 0) for s in self.stats_sizes + ['OVER']])