本文整理汇总了Python中swift.obj.diskfile.get_data_dir函数的典型用法代码示例。如果您正苦于以下问题:Python get_data_dir函数的具体用法?Python get_data_dir怎么用?Python get_data_dir使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_data_dir函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: setUp
def setUp(self):
utils.HASH_PATH_SUFFIX = 'endcap'
utils.HASH_PATH_PREFIX = ''
# Setup a test ring (stolen from common/test_ring.py)
self.testdir = tempfile.mkdtemp()
self.devices = os.path.join(self.testdir, 'node')
rmtree(self.testdir, ignore_errors=1)
os.mkdir(self.testdir)
os.mkdir(self.devices)
os.mkdir(os.path.join(self.devices, 'sda'))
self.objects = os.path.join(self.devices, 'sda',
diskfile.get_data_dir(0))
self.objects_1 = os.path.join(self.devices, 'sda',
diskfile.get_data_dir(1))
os.mkdir(self.objects)
os.mkdir(self.objects_1)
self.parts = {}
self.parts_1 = {}
for part in ['0', '1', '2', '3']:
self.parts[part] = os.path.join(self.objects, part)
os.mkdir(os.path.join(self.objects, part))
self.parts_1[part] = os.path.join(self.objects_1, part)
os.mkdir(os.path.join(self.objects_1, part))
_create_test_rings(self.testdir)
self.conf = dict(
swift_dir=self.testdir, devices=self.devices, mount_check='false',
timeout='300', stats_interval='1')
self.replicator = object_replicator.ObjectReplicator(self.conf)
self.replicator.logger = FakeLogger()
self.df_mgr = diskfile.DiskFileManager(self.conf,
self.replicator.logger)
示例2: setUp
def setUp(self):
self.testdir = os.path.join(mkdtemp(), 'tmp_test_object_auditor')
self.devices = os.path.join(self.testdir, 'node')
self.rcache = os.path.join(self.testdir, 'object.recon')
self.logger = FakeLogger()
rmtree(self.testdir, ignore_errors=1)
mkdirs(os.path.join(self.devices, 'sda'))
os.mkdir(os.path.join(self.devices, 'sdb'))
# policy 0
self.objects = os.path.join(self.devices, 'sda', get_data_dir(0))
self.objects_2 = os.path.join(self.devices, 'sdb', get_data_dir(0))
os.mkdir(self.objects)
# policy 1
self.objects_p1 = os.path.join(self.devices, 'sda', get_data_dir(1))
self.objects_2_p1 = os.path.join(self.devices, 'sdb', get_data_dir(1))
os.mkdir(self.objects_p1)
self.parts = self.parts_p1 = {}
for part in ['0', '1', '2', '3']:
self.parts[part] = os.path.join(self.objects, part)
self.parts_p1[part] = os.path.join(self.objects_p1, part)
os.mkdir(os.path.join(self.objects, part))
os.mkdir(os.path.join(self.objects_p1, part))
self.conf = dict(
devices=self.devices,
mount_check='false',
object_size_stats='10,100,1024,10240')
self.df_mgr = DiskFileManager(self.conf, self.logger)
# diskfiles for policy 0, 1
self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o', 0)
self.disk_file_p1 = self.df_mgr.get_diskfile('sda', '0', 'a', 'c',
'o', 1)
示例3: setUp
def setUp(self):
self.testdir = os.path.join(mkdtemp(), "tmp_test_object_auditor")
self.devices = os.path.join(self.testdir, "node")
self.rcache = os.path.join(self.testdir, "object.recon")
self.logger = FakeLogger()
rmtree(self.testdir, ignore_errors=1)
mkdirs(os.path.join(self.devices, "sda"))
os.mkdir(os.path.join(self.devices, "sdb"))
# policy 0
self.objects = os.path.join(self.devices, "sda", get_data_dir(POLICIES[0]))
self.objects_2 = os.path.join(self.devices, "sdb", get_data_dir(POLICIES[0]))
os.mkdir(self.objects)
# policy 1
self.objects_p1 = os.path.join(self.devices, "sda", get_data_dir(POLICIES[1]))
self.objects_2_p1 = os.path.join(self.devices, "sdb", get_data_dir(POLICIES[1]))
os.mkdir(self.objects_p1)
self.parts = self.parts_p1 = {}
for part in ["0", "1", "2", "3"]:
self.parts[part] = os.path.join(self.objects, part)
self.parts_p1[part] = os.path.join(self.objects_p1, part)
os.mkdir(os.path.join(self.objects, part))
os.mkdir(os.path.join(self.objects_p1, part))
self.conf = dict(devices=self.devices, mount_check="false", object_size_stats="10,100,1024,10240")
self.df_mgr = DiskFileManager(self.conf, self.logger)
# diskfiles for policy 0, 1
self.disk_file = self.df_mgr.get_diskfile("sda", "0", "a", "c", "o", policy=POLICIES[0])
self.disk_file_p1 = self.df_mgr.get_diskfile("sda", "0", "a", "c", "o", policy=POLICIES[1])
示例4: rsync
def rsync(self, node, job, suffixes):
"""
Uses rsync to implement the sync method. This was the first
sync method in Swift.
"""
if not os.path.exists(job["path"]):
return False
args = [
"rsync",
"--recursive",
"--whole-file",
"--human-readable",
"--xattrs",
"--itemize-changes",
"--ignore-existing",
"--timeout=%s" % self.rsync_io_timeout,
"--contimeout=%s" % self.rsync_io_timeout,
"--bwlimit=%s" % self.rsync_bwlimit,
]
node_ip = rsync_ip(node["replication_ip"])
if self.vm_test_mode:
rsync_module = "%s::object%s" % (node_ip, node["replication_port"])
else:
rsync_module = "%s::object" % node_ip
had_any = False
for suffix in suffixes:
spath = join(job["path"], suffix)
if os.path.exists(spath):
args.append(spath)
had_any = True
if not had_any:
return False
data_dir = get_data_dir(job["policy_idx"])
args.append(join(rsync_module, node["device"], data_dir, job["partition"]))
return self._rsync(args) == 0
示例5: storage_dir
def storage_dir(self, server, node, part=None, policy=None):
policy = policy or self.policy
device_path = self.device_dir(server, node)
path_parts = [device_path, get_data_dir(policy)]
if part is not None:
path_parts.append(str(part))
return os.path.join(*path_parts)
示例6: rsync
def rsync(self, node, job, suffixes):
"""
Uses rsync to implement the sync method. This was the first
sync method in Swift.
"""
if not os.path.exists(job["path"]):
return False, {}
args = [
"rsync",
"--recursive",
"--whole-file",
"--human-readable",
"--xattrs",
"--itemize-changes",
"--ignore-existing",
"--timeout=%s" % self.rsync_io_timeout,
"--contimeout=%s" % self.rsync_io_timeout,
"--bwlimit=%s" % self.rsync_bwlimit,
]
if self.rsync_compress and job["region"] != node["region"]:
# Allow for compression, but only if the remote node is in
# a different region than the local one.
args.append("--compress")
rsync_module = rsync_module_interpolation(self.rsync_module, node)
had_any = False
for suffix in suffixes:
spath = join(job["path"], suffix)
if os.path.exists(spath):
args.append(spath)
had_any = True
if not had_any:
return False, {}
data_dir = get_data_dir(job["policy"])
args.append(join(rsync_module, node["device"], data_dir, job["partition"]))
return self._rsync(args) == 0, {}
示例7: setup_bad_zero_byte
def setup_bad_zero_byte(self, with_ts=False):
self.auditor = auditor.ObjectAuditor(self.conf)
self.auditor.log_time = 0
ts_file_path = ''
if with_ts:
name_hash = hash_path('a', 'c', 'o')
dir_path = os.path.join(
self.devices, 'sda',
storage_directory(get_data_dir(0), '0', name_hash))
ts_file_path = os.path.join(dir_path, '99999.ts')
if not os.path.exists(dir_path):
mkdirs(dir_path)
fp = open(ts_file_path, 'w')
write_metadata(fp, {'X-Timestamp': '99999', 'name': '/a/c/o'})
fp.close()
etag = md5()
with self.disk_file.create() as writer:
etag = etag.hexdigest()
metadata = {
'ETag': etag,
'X-Timestamp': str(normalize_timestamp(time.time())),
'Content-Length': 10,
}
writer.put(metadata)
etag = md5()
etag = etag.hexdigest()
metadata['ETag'] = etag
write_metadata(writer._fd, metadata)
return ts_file_path
示例8: object_key
def object_key(policy_index, hashpath, timestamp='',
extension='.data', nounce=''):
storage_policy = diskfile.get_data_dir(policy_index)
if timestamp:
return '%s.%s.%s%s.%s' % (storage_policy, hashpath, timestamp,
extension, nounce)
else:
# for use with getPrevious
return '%s.%s/' % (storage_policy, hashpath)
示例9: collect_parts
def collect_parts(self, override_devices=None, override_partitions=None):
"""
Helper for yielding partitions in the top level reconstructor
"""
override_devices = override_devices or []
override_partitions = override_partitions or []
ips = whataremyips(self.bind_ip)
for policy in POLICIES:
if policy.policy_type != EC_POLICY:
continue
self._diskfile_mgr = self._df_router[policy]
self.load_object_ring(policy)
data_dir = get_data_dir(policy)
local_devices = itertools.ifilter(
lambda dev: dev and is_local_device(ips, self.port, dev["replication_ip"], dev["replication_port"]),
policy.object_ring.devs,
)
for local_dev in local_devices:
if override_devices and (local_dev["device"] not in override_devices):
continue
dev_path = self._df_router[policy].get_dev_path(local_dev["device"])
if not dev_path:
self.logger.warn(_("%s is not mounted"), local_dev["device"])
continue
obj_path = join(dev_path, data_dir)
tmp_path = join(dev_path, get_tmp_dir(int(policy)))
unlink_older_than(tmp_path, time.time() - self.reclaim_age)
if not os.path.exists(obj_path):
try:
mkdirs(obj_path)
except Exception:
self.logger.exception("Unable to create %s" % obj_path)
continue
try:
partitions = os.listdir(obj_path)
except OSError:
self.logger.exception("Unable to list partitions in %r" % obj_path)
continue
for partition in partitions:
part_path = join(obj_path, partition)
if not (partition.isdigit() and os.path.isdir(part_path)):
self.logger.warning("Unexpected entity in data dir: %r" % part_path)
remove_file(part_path)
continue
partition = int(partition)
if override_partitions and (partition not in override_partitions):
continue
part_info = {
"local_dev": local_dev,
"policy": policy,
"partition": partition,
"part_path": part_path,
}
yield part_info
示例10: object_key
def object_key(policy, hashpath, timestamp="", extension=".data", nonce="", frag_index=None):
if frag_index is not None:
frag_trailer = "-%s" % frag_index
else:
frag_trailer = ""
storage_policy = diskfile.get_data_dir(policy)
if timestamp:
return "%s.%s.%s%s.%s%s" % (storage_policy, hashpath, timestamp, extension, nonce, frag_trailer)
else:
# for use with getPrevious
return "%s.%s/" % (storage_policy, hashpath)
示例11: process_repl
def process_repl(self, policy, ips, override_devices=None,
override_partitions=None):
"""
Helper function for collect_jobs to build jobs for replication
using replication style storage policy
"""
jobs = []
obj_ring = self.get_object_ring(policy.idx)
data_dir = get_data_dir(policy.idx)
for local_dev in [dev for dev in obj_ring.devs
if (dev
and is_local_device(ips,
self.port,
dev['replication_ip'],
dev['replication_port'])
and (override_devices is None
or dev['device'] in override_devices))]:
dev_path = join(self.devices_dir, local_dev['device'])
obj_path = join(dev_path, data_dir)
tmp_path = join(dev_path, get_tmp_dir(int(policy)))
if self.mount_check and not ismount(dev_path):
self.logger.warn(_('%s is not mounted'), local_dev['device'])
continue
unlink_older_than(tmp_path, time.time() - self.reclaim_age)
if not os.path.exists(obj_path):
try:
mkdirs(obj_path)
except Exception:
self.logger.exception('ERROR creating %s' % obj_path)
continue
for partition in os.listdir(obj_path):
if (override_partitions is not None
and partition not in override_partitions):
continue
try:
job_path = join(obj_path, partition)
part_nodes = obj_ring.get_part_nodes(int(partition))
nodes = [node for node in part_nodes
if node['id'] != local_dev['id']]
jobs.append(
dict(path=job_path,
device=local_dev['device'],
obj_path=obj_path,
nodes=nodes,
delete=len(nodes) > len(part_nodes) - 1,
policy_idx=policy.idx,
partition=partition,
object_ring=obj_ring,
region=local_dev['region']))
except ValueError:
continue
return jobs
示例12: _setup_data_file
def _setup_data_file(self, container, obj, data):
client.put_container(self.url, self.token, container, headers={"X-Storage-Policy": self.policy.name})
client.put_object(self.url, self.token, container, obj, data)
odata = client.get_object(self.url, self.token, container, obj)[-1]
self.assertEquals(odata, data)
opart, onodes = self.object_ring.get_nodes(self.account, container, obj)
onode = onodes[0]
node_id = (onode["port"] - 6000) / 10
device = onode["device"]
hash_str = hash_path(self.account, container, obj)
obj_server_conf = readconf(self.configs["object-server"][node_id])
devices = obj_server_conf["app:object-server"]["devices"]
obj_dir = "%s/%s/%s/%s/%s/%s/" % (devices, device, get_data_dir(self.policy), opart, hash_str[-3:], hash_str)
data_file = get_data_file_path(obj_dir)
return onode, opart, data_file
示例13: process_repl
def process_repl(self, policy, jobs, ips):
"""
Helper function for collect_jobs to build jobs for replication
using replication style storage policy
"""
obj_ring = self.get_object_ring(policy.idx)
data_dir = get_data_dir(policy.idx)
for local_dev in [dev for dev in obj_ring.devs
if dev and dev['replication_ip'] in ips and
dev['replication_port'] == self.port]:
dev_path = join(self.devices_dir, local_dev['device'])
obj_path = join(dev_path, data_dir)
tmp_path = join(dev_path, get_tmp_dir(int(policy)))
if self.mount_check and not ismount(dev_path):
self.logger.warn(_('%s is not mounted'), local_dev['device'])
continue
unlink_older_than(tmp_path, time.time() - self.reclaim_age)
if not os.path.exists(obj_path):
try:
mkdirs(obj_path)
except Exception:
self.logger.exception('ERROR creating %s' % obj_path)
continue
for partition in os.listdir(obj_path):
try:
job_path = join(obj_path, partition)
if isfile(job_path):
# Clean up any (probably zero-byte) files where a
# partition should be.
self.logger.warning(
'Removing partition directory '
'which was a file: %s', job_path)
os.remove(job_path)
continue
part_nodes = obj_ring.get_part_nodes(int(partition))
nodes = [node for node in part_nodes
if node['id'] != local_dev['id']]
jobs.append(
dict(path=job_path,
device=local_dev['device'],
nodes=nodes,
delete=len(nodes) > len(part_nodes) - 1,
policy_idx=policy.idx,
partition=partition,
object_ring=obj_ring))
except (ValueError, OSError):
continue
示例14: _setup_data_file
def _setup_data_file(self, container, obj, data):
client.put_container(self.url, self.token, container)
client.put_object(self.url, self.token, container, obj, data)
odata = client.get_object(self.url, self.token, container, obj)[-1]
self.assertEquals(odata, data)
opart, onodes = self.object_ring.get_nodes(
self.account, container, obj)
onode = onodes[0]
node_id = (onode['port'] - 6000) / 10
device = onode['device']
hash_str = hash_path(self.account, container, obj)
obj_server_conf = readconf(self.configs['object-server'][node_id])
devices = obj_server_conf['app:object-server']['devices']
obj_dir = '%s/%s/%s/%s/%s/%s/' % (devices, device,
get_data_dir(self.policy.idx),
opart, hash_str[-3:], hash_str)
data_file = get_data_file_path(obj_dir)
return onode, opart, data_file
示例15: process_repl
def process_repl(self, policy, jobs, ips):
"""
Helper function for collect_jobs to build jobs for replication
using replication style storage policy
"""
obj_ring = self.get_object_ring(policy.idx)
data_dir = get_data_dir(policy.idx)
for local_dev in [
dev
for dev in obj_ring.devs
if dev and dev["replication_ip"] in ips and dev["replication_port"] == self.port
]:
dev_path = join(self.devices_dir, local_dev["device"])
obj_path = join(dev_path, data_dir)
tmp_path = join(dev_path, get_tmp_dir(int(policy)))
if self.mount_check and not ismount(dev_path):
self.logger.warn(_("%s is not mounted"), local_dev["device"])
continue
unlink_older_than(tmp_path, time.time() - self.reclaim_age)
if not os.path.exists(obj_path):
try:
mkdirs(obj_path)
except Exception:
self.logger.exception("ERROR creating %s" % obj_path)
continue
for partition in os.listdir(obj_path):
try:
job_path = join(obj_path, partition)
part_nodes = obj_ring.get_part_nodes(int(partition))
nodes = [node for node in part_nodes if node["id"] != local_dev["id"]]
jobs.append(
dict(
path=job_path,
device=local_dev["device"],
nodes=nodes,
delete=len(nodes) > len(part_nodes) - 1,
policy_idx=policy.idx,
partition=partition,
object_ring=obj_ring,
)
)
except ValueError:
continue