本文整理汇总了Python中swift.common.utils.unlink_older_than函数的典型用法代码示例。如果您正苦于以下问题:Python unlink_older_than函数的具体用法?Python unlink_older_than怎么用?Python unlink_older_than使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了unlink_older_than函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run_once
def run_once(self, *args, **kwargs):
"""Run a replication pass once."""
self._zero_stats()
dirs = []
ips = whataremyips()
if not ips:
self.logger.error(_('ERROR Failed to get my own IPs?'))
return
for node in self.ring.devs:
if node and node['ip'] in ips and node['port'] == self.port:
if self.mount_check and not os.path.ismount(
os.path.join(self.root, node['device'])):
self.logger.warn(
_('Skipping %(device)s as it is not mounted') % node)
continue
unlink_older_than(
os.path.join(self.root, node['device'], 'tmp'),
time.time() - self.reclaim_age)
datadir = os.path.join(self.root, node['device'], self.datadir)
if os.path.isdir(datadir):
dirs.append((datadir, node['id']))
self.logger.info(_('Beginning replication run'))
for part, object_file, node_id in self.roundrobin_datadirs(dirs):
self.cpool.spawn_n(
self._replicate_object, part, object_file, node_id)
self.cpool.waitall()
self.logger.info(_('Replication run OVER'))
self._report_stats()
示例2: collect_jobs
def collect_jobs(self):
"""
Returns a sorted list of jobs (dictionaries) that specify the
partitions, nodes, etc to be rsynced.
"""
jobs = []
ips = whataremyips()
for local_dev in [dev for dev in self.object_ring.devs
if dev and dev['ip'] in ips and dev['port'] == self.port]:
dev_path = join(self.devices_dir, local_dev['device'])
obj_path = join(dev_path, 'objects')
tmp_path = join(dev_path, 'tmp')
if self.mount_check and not os.path.ismount(dev_path):
self.logger.warn(_('%s is not mounted'), local_dev['device'])
continue
unlink_older_than(tmp_path, time.time() - self.reclaim_age)
if not os.path.exists(obj_path):
continue
for partition in os.listdir(obj_path):
try:
nodes = [node for node in
self.object_ring.get_part_nodes(int(partition))
if node['id'] != local_dev['id']]
jobs.append(dict(path=join(obj_path, partition),
nodes=nodes,
delete=len(nodes) > self.object_ring.replica_count - 1,
partition=partition))
except ValueError:
continue
random.shuffle(jobs)
# Partititons that need to be deleted take priority
jobs.sort(key=lambda job: not job['delete'])
self.job_count = len(jobs)
return jobs
示例3: collect_jobs
def collect_jobs(self):
"""
Returns a sorted list of jobs (dictionaries) that specify the
partitions, nodes, etc to be synced.
"""
jobs = []
ips = whataremyips()
for local_dev in [dev for dev in self.object_ring.devs
if dev and dev['replication_ip'] in ips and
dev['replication_port'] == self.port]:
dev_path = join(self.devices_dir, local_dev['device'])
obj_path = join(dev_path, 'objects')
tmp_path = join(dev_path, 'tmp')
if self.mount_check and not ismount(dev_path):
self.logger.warn(_('%s is not mounted'), local_dev['device'])
continue
unlink_older_than(tmp_path, time.time() - self.reclaim_age)
if not os.path.exists(obj_path):
try:
mkdirs(obj_path)
except Exception:
self.logger.exception('ERROR creating %s' % obj_path)
continue
for partition in os.listdir(obj_path):
try:
job_path = join(obj_path, partition)
if isfile(job_path):
# Clean up any (probably zero-byte) files where a
# partition should be.
self.logger.warning('Removing partition directory '
'which was a file: %s', job_path)
os.remove(job_path)
continue
part_nodes = \
self.object_ring.get_part_nodes(int(partition))
#MODIFIED LightSync
for mypos in range(len(part_nodes)):
if part_nodes[mypos]['id'] == local_dev['id']:
break
nodes = part_nodes[mypos+1:]+part_nodes[:mypos]
##
jobs.append(
dict(path=job_path,
device=local_dev['device'],
nodes=nodes,
delete=len(nodes) > len(part_nodes) - 1,
partition=partition))
except (ValueError, OSError):
continue
random.shuffle(jobs)
if self.handoffs_first:
# Move the handoff parts to the front of the list
jobs.sort(key=lambda job: not job['delete'])
self.job_count = len(jobs)
return jobs
示例4: collect_parts
def collect_parts(self, override_devices=None, override_partitions=None):
"""
Helper for yielding partitions in the top level reconstructor
"""
override_devices = override_devices or []
override_partitions = override_partitions or []
ips = whataremyips(self.bind_ip)
for policy in POLICIES:
if policy.policy_type != EC_POLICY:
continue
self._diskfile_mgr = self._df_router[policy]
self.load_object_ring(policy)
data_dir = get_data_dir(policy)
local_devices = itertools.ifilter(
lambda dev: dev and is_local_device(ips, self.port, dev["replication_ip"], dev["replication_port"]),
policy.object_ring.devs,
)
for local_dev in local_devices:
if override_devices and (local_dev["device"] not in override_devices):
continue
dev_path = self._df_router[policy].get_dev_path(local_dev["device"])
if not dev_path:
self.logger.warn(_("%s is not mounted"), local_dev["device"])
continue
obj_path = join(dev_path, data_dir)
tmp_path = join(dev_path, get_tmp_dir(int(policy)))
unlink_older_than(tmp_path, time.time() - self.reclaim_age)
if not os.path.exists(obj_path):
try:
mkdirs(obj_path)
except Exception:
self.logger.exception("Unable to create %s" % obj_path)
continue
try:
partitions = os.listdir(obj_path)
except OSError:
self.logger.exception("Unable to list partitions in %r" % obj_path)
continue
for partition in partitions:
part_path = join(obj_path, partition)
if not (partition.isdigit() and os.path.isdir(part_path)):
self.logger.warning("Unexpected entity in data dir: %r" % part_path)
remove_file(part_path)
continue
partition = int(partition)
if override_partitions and (partition not in override_partitions):
continue
part_info = {
"local_dev": local_dev,
"policy": policy,
"partition": partition,
"part_path": part_path,
}
yield part_info
示例5: process_repl
def process_repl(self, policy, ips, override_devices=None,
override_partitions=None):
"""
Helper function for collect_jobs to build jobs for replication
using replication style storage policy
"""
jobs = []
obj_ring = self.get_object_ring(policy.idx)
data_dir = get_data_dir(policy.idx)
for local_dev in [dev for dev in obj_ring.devs
if (dev
and is_local_device(ips,
self.port,
dev['replication_ip'],
dev['replication_port'])
and (override_devices is None
or dev['device'] in override_devices))]:
dev_path = join(self.devices_dir, local_dev['device'])
obj_path = join(dev_path, data_dir)
tmp_path = join(dev_path, get_tmp_dir(int(policy)))
if self.mount_check and not ismount(dev_path):
self.logger.warn(_('%s is not mounted'), local_dev['device'])
continue
unlink_older_than(tmp_path, time.time() - self.reclaim_age)
if not os.path.exists(obj_path):
try:
mkdirs(obj_path)
except Exception:
self.logger.exception('ERROR creating %s' % obj_path)
continue
for partition in os.listdir(obj_path):
if (override_partitions is not None
and partition not in override_partitions):
continue
try:
job_path = join(obj_path, partition)
part_nodes = obj_ring.get_part_nodes(int(partition))
nodes = [node for node in part_nodes
if node['id'] != local_dev['id']]
jobs.append(
dict(path=job_path,
device=local_dev['device'],
obj_path=obj_path,
nodes=nodes,
delete=len(nodes) > len(part_nodes) - 1,
policy_idx=policy.idx,
partition=partition,
object_ring=obj_ring,
region=local_dev['region']))
except ValueError:
continue
return jobs
示例6: collect_jobs
def collect_jobs(self):
"""
Returns a sorted list of jobs (dictionaries) that specify the
partitions, nodes, etc to be synced.
"""
jobs = []
ips = whataremyips()
for local_dev in [
dev
for dev in self.object_ring.devs
if dev and dev["replication_ip"] in ips and dev["replication_port"] == self.port
]:
dev_path = join(self.devices_dir, local_dev["device"])
obj_path = join(dev_path, "objects")
tmp_path = join(dev_path, "tmp")
if self.mount_check and not os.path.ismount(dev_path):
self.logger.warn(_("%s is not mounted"), local_dev["device"])
continue
unlink_older_than(tmp_path, time.time() - self.reclaim_age)
if not os.path.exists(obj_path):
try:
mkdirs(obj_path)
except Exception:
self.logger.exception("ERROR creating %s" % obj_path)
continue
for partition in os.listdir(obj_path):
try:
job_path = join(obj_path, partition)
if isfile(job_path):
# Clean up any (probably zero-byte) files where a
# partition should be.
self.logger.warning("Removing partition directory " "which was a file: %s", job_path)
os.remove(job_path)
continue
part_nodes = self.object_ring.get_part_nodes(int(partition))
nodes = [node for node in part_nodes if node["id"] != local_dev["id"]]
jobs.append(
dict(
path=job_path,
device=local_dev["device"],
nodes=nodes,
delete=len(nodes) > len(part_nodes) - 1,
partition=partition,
)
)
except (ValueError, OSError):
continue
random.shuffle(jobs)
if self.handoffs_first:
# Move the handoff parts to the front of the list
jobs.sort(key=lambda job: not job["delete"])
self.job_count = len(jobs)
return jobs
示例7: process_repl
def process_repl(self, policy, jobs, ips):
"""
Helper function for collect_jobs to build jobs for replication
using replication style storage policy
"""
obj_ring = self.get_object_ring(policy.idx)
data_dir = get_data_dir(policy.idx)
for local_dev in [dev for dev in obj_ring.devs
if dev and dev['replication_ip'] in ips and
dev['replication_port'] == self.port]:
dev_path = join(self.devices_dir, local_dev['device'])
obj_path = join(dev_path, data_dir)
tmp_path = join(dev_path, get_tmp_dir(int(policy)))
if self.mount_check and not ismount(dev_path):
self.logger.warn(_('%s is not mounted'), local_dev['device'])
continue
unlink_older_than(tmp_path, time.time() - self.reclaim_age)
if not os.path.exists(obj_path):
try:
mkdirs(obj_path)
except Exception:
self.logger.exception('ERROR creating %s' % obj_path)
continue
for partition in os.listdir(obj_path):
try:
job_path = join(obj_path, partition)
if isfile(job_path):
# Clean up any (probably zero-byte) files where a
# partition should be.
self.logger.warning(
'Removing partition directory '
'which was a file: %s', job_path)
os.remove(job_path)
continue
part_nodes = obj_ring.get_part_nodes(int(partition))
nodes = [node for node in part_nodes
if node['id'] != local_dev['id']]
jobs.append(
dict(path=job_path,
device=local_dev['device'],
nodes=nodes,
delete=len(nodes) > len(part_nodes) - 1,
policy_idx=policy.idx,
partition=partition,
object_ring=obj_ring))
except (ValueError, OSError):
continue
示例8: collect_jobs
def collect_jobs(self):
"""
Returns a sorted list of jobs (dictionaries) that specify the
partitions, nodes, etc to be rsynced.
"""
jobs = []
ips = whataremyips()
for local_dev in [dev for dev in self.object_ring.devs
if dev and dev['ip'] in ips and
dev['port'] == self.port]:
dev_path = join(self.devices_dir, local_dev['device'])
obj_path = join(dev_path, 'objects')
tmp_path = join(dev_path, 'tmp')
if self.mount_check and not os.path.ismount(dev_path):
self.logger.warn(_('%s is not mounted'), local_dev['device'])
continue
unlink_older_than(tmp_path, time.time() - self.reclaim_age)
if not os.path.exists(obj_path):
try:
mkdirs(obj_path)
except Exception:
self.logger.exception('ERROR creating %s' % obj_path)
continue
for partition in os.listdir(obj_path):
try:
job_path = join(obj_path, partition)
if isfile(job_path):
# Clean up any (probably zero-byte) files where a
# partition should be.
self.logger.warning('Removing partition directory '
'which was a file: %s', job_path)
os.remove(job_path)
continue
part_nodes = \
self.object_ring.get_part_nodes(int(partition))
nodes = [node for node in part_nodes
if node['id'] != local_dev['id']]
jobs.append(
dict(path=job_path,
device=local_dev['device'],
nodes=nodes,
delete=len(nodes) > len(part_nodes) - 1,
partition=partition))
except (ValueError, OSError):
continue
random.shuffle(jobs)
self.job_count = len(jobs)
return jobs
示例9: process_repl
def process_repl(self, policy, jobs, ips):
"""
Helper function for collect_jobs to build jobs for replication
using replication style storage policy
"""
obj_ring = self.get_object_ring(policy.idx)
data_dir = get_data_dir(policy.idx)
for local_dev in [
dev
for dev in obj_ring.devs
if dev and dev["replication_ip"] in ips and dev["replication_port"] == self.port
]:
dev_path = join(self.devices_dir, local_dev["device"])
obj_path = join(dev_path, data_dir)
tmp_path = join(dev_path, get_tmp_dir(int(policy)))
if self.mount_check and not ismount(dev_path):
self.logger.warn(_("%s is not mounted"), local_dev["device"])
continue
unlink_older_than(tmp_path, time.time() - self.reclaim_age)
if not os.path.exists(obj_path):
try:
mkdirs(obj_path)
except Exception:
self.logger.exception("ERROR creating %s" % obj_path)
continue
for partition in os.listdir(obj_path):
try:
job_path = join(obj_path, partition)
part_nodes = obj_ring.get_part_nodes(int(partition))
nodes = [node for node in part_nodes if node["id"] != local_dev["id"]]
jobs.append(
dict(
path=job_path,
device=local_dev["device"],
nodes=nodes,
delete=len(nodes) > len(part_nodes) - 1,
policy_idx=policy.idx,
partition=partition,
object_ring=obj_ring,
)
)
except ValueError:
continue
示例10: run_once
def run_once(self, *args, **kwargs):
"""Run a replication pass once."""
self._zero_stats()
dirs = []
ips = whataremyips(self.bind_ip)
if not ips:
self.logger.error(_('ERROR Failed to get my own IPs?'))
return
self._local_device_ids = set()
found_local = False
for node in self.ring.devs:
if node and is_local_device(ips, self.port,
node['replication_ip'],
node['replication_port']):
found_local = True
if self.mount_check and not ismount(
os.path.join(self.root, node['device'])):
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in self.ring.devs if failure_dev])
self.logger.warning(
_('Skipping %(device)s as it is not mounted') % node)
continue
unlink_older_than(
os.path.join(self.root, node['device'], 'tmp'),
time.time() - self.reclaim_age)
datadir = os.path.join(self.root, node['device'], self.datadir)
if os.path.isdir(datadir):
self._local_device_ids.add(node['id'])
dirs.append((datadir, node['id']))
if not found_local:
self.logger.error("Can't find itself %s with port %s in ring "
"file, not replicating",
", ".join(ips), self.port)
self.logger.info(_('Beginning replication run'))
for part, object_file, node_id in roundrobin_datadirs(dirs):
self.cpool.spawn_n(
self._replicate_object, part, object_file, node_id)
self.cpool.waitall()
self.logger.info(_('Replication run OVER'))
self._report_stats()
示例11: build_replication_jobs
def build_replication_jobs(self, policy, ips, override_devices=None,
override_partitions=None):
"""
Helper function for collect_jobs to build jobs for replication
using replication style storage policy
"""
jobs = []
df_mgr = self._df_router[policy]
self.all_devs_info.update(
[(dev['replication_ip'], dev['device'])
for dev in policy.object_ring.devs if dev])
data_dir = get_data_dir(policy)
found_local = False
for local_dev in [dev for dev in policy.object_ring.devs
if (dev
and is_local_device(ips,
self.port,
dev['replication_ip'],
dev['replication_port'])
and (override_devices is None
or dev['device'] in override_devices))]:
found_local = True
dev_path = check_drive(self.devices_dir, local_dev['device'],
self.mount_check)
if not dev_path:
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in policy.object_ring.devs
if failure_dev])
self.logger.warning(
_('%s is not mounted'), local_dev['device'])
continue
obj_path = join(dev_path, data_dir)
tmp_path = join(dev_path, get_tmp_dir(policy))
unlink_older_than(tmp_path, time.time() -
df_mgr.reclaim_age)
if not os.path.exists(obj_path):
try:
mkdirs(obj_path)
except Exception:
self.logger.exception('ERROR creating %s' % obj_path)
continue
for partition in os.listdir(obj_path):
if (override_partitions is not None
and partition not in override_partitions):
continue
if (partition.startswith('auditor_status_') and
partition.endswith('.json')):
# ignore auditor status files
continue
part_nodes = None
try:
job_path = join(obj_path, partition)
part_nodes = policy.object_ring.get_part_nodes(
int(partition))
nodes = [node for node in part_nodes
if node['id'] != local_dev['id']]
jobs.append(
dict(path=job_path,
device=local_dev['device'],
obj_path=obj_path,
nodes=nodes,
delete=len(nodes) > len(part_nodes) - 1,
policy=policy,
partition=partition,
region=local_dev['region']))
except ValueError:
if part_nodes:
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in nodes])
else:
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in policy.object_ring.devs
if failure_dev])
continue
if not found_local:
self.logger.error("Can't find itself in policy with index %d with"
" ips %s and with port %s in ring file, not"
" replicating",
int(policy), ", ".join(ips), self.port)
return jobs
示例12: collect_parts
def collect_parts(self, override_devices=None,
override_partitions=None):
"""
Helper for yielding partitions in the top level reconstructor
"""
override_devices = override_devices or []
override_partitions = override_partitions or []
ips = whataremyips(self.bind_ip)
for policy in POLICIES:
if policy.policy_type != EC_POLICY:
continue
self._diskfile_mgr = self._df_router[policy]
self.load_object_ring(policy)
data_dir = get_data_dir(policy)
local_devices = list(six.moves.filter(
lambda dev: dev and is_local_device(
ips, self.port,
dev['replication_ip'], dev['replication_port']),
policy.object_ring.devs))
if override_devices:
self.device_count = len(override_devices)
else:
self.device_count = len(local_devices)
for local_dev in local_devices:
if override_devices and (local_dev['device'] not in
override_devices):
continue
self.reconstruction_device_count += 1
dev_path = self._df_router[policy].get_dev_path(
local_dev['device'])
if not dev_path:
self.logger.warning(_('%s is not mounted'),
local_dev['device'])
continue
obj_path = join(dev_path, data_dir)
tmp_path = join(dev_path, get_tmp_dir(int(policy)))
unlink_older_than(tmp_path, time.time() -
self.reclaim_age)
if not os.path.exists(obj_path):
try:
mkdirs(obj_path)
except Exception:
self.logger.exception(
'Unable to create %s' % obj_path)
continue
try:
partitions = os.listdir(obj_path)
except OSError:
self.logger.exception(
'Unable to list partitions in %r' % obj_path)
continue
self.part_count += len(partitions)
for partition in partitions:
part_path = join(obj_path, partition)
if not (partition.isdigit() and
os.path.isdir(part_path)):
self.logger.warning(
'Unexpected entity in data dir: %r' % part_path)
remove_file(part_path)
self.reconstruction_part_count += 1
continue
partition = int(partition)
if override_partitions and (partition not in
override_partitions):
continue
part_info = {
'local_dev': local_dev,
'policy': policy,
'partition': partition,
'part_path': part_path,
}
yield part_info
self.reconstruction_part_count += 1
示例13: collect_jobs
def collect_jobs(self):
"""
Returns a sorted list of jobs (dictionaries) that specify the
partitions, nodes, etc to be synced.
"""
jobs = []
ips = whataremyips()
for local_dev in [dev for dev in self.object_ring.devs
if dev and dev['replication_ip'] in ips and
dev['replication_port'] == self.port]:
dev_path = join(self.devices_dir, local_dev['device'])
obj_path = join(dev_path, 'objects')
tmp_path = join(dev_path, 'tmp')
if self.mount_check and not ismount(dev_path):
self.logger.warn(_('%s is not mounted'), local_dev['device'])
continue
unlink_older_than(tmp_path, time.time() - self.reclaim_age)
if not os.path.exists(obj_path):
try:
mkdirs(obj_path)
except Exception:
self.logger.exception('ERROR creating %s' % obj_path)
continue
for partition in os.listdir(obj_path):
try:
job_path = join(obj_path, partition)
if isfile(job_path):
# Clean up any (probably zero-byte) files where a
# partition should be.
self.logger.warning('Removing partition directory '
'which was a file: %s', job_path)
os.remove(job_path)
continue
part_nodes = \
self.object_ring.get_part_nodes(int(partition))
#### CHANGED CODE ####
#f = open("/home/swift/spindowndevices","r")
#sdlist = f.read().strip().split("\n")
#logging.info("===Spun down devices===:%s",str(sdlist))
#f.close()
#sddict =dict()
#for i in sdlist:
# logging.info("===sdditc===%s",sddict)
# if(i.split(":")[0] in sddict):
# sddict[i.split(":")[0]].append(i.split(":")[1])
# else:
# sddict[i.split(":")[0]] = []
# sddict[i.split(":")[0]].append(i.split(":")[1])
#nodes = []
#for node in part_nodes:
# if(node['ip'] not in sddict and node['id']!= local_dev['id']):
# nodes.append(node)
# else:
# if(node['device'] not in sddict[node['ip']] and node['id']!=local_dev['id']):
# nodes.append(node)
nodes = [node for node in part_nodes
if node['id'] != local_dev['id']]
logging.info("===Replication nodes===%s",str(nodes))
# logging.info("===sddict===%s",str(sddict))
#### END CHANGED CODE ####
jobs.append(
dict(path=job_path,
device=local_dev['device'],
nodes=nodes,
delete=len(nodes) > len(part_nodes) - 1,
partition=partition))
except (ValueError, OSError):
continue
random.shuffle(jobs)
if self.handoffs_first:
# Move the handoff parts to the front of the list
jobs.sort(key=lambda job: not job['delete'])
self.job_count = len(jobs)
return jobs
示例14: build_replication_jobs
def build_replication_jobs(self, policy, ips, override_devices=None, override_partitions=None):
"""
Helper function for collect_jobs to build jobs for replication
using replication style storage policy
"""
jobs = []
self.all_devs_info.update([(dev["replication_ip"], dev["device"]) for dev in policy.object_ring.devs if dev])
data_dir = get_data_dir(policy)
found_local = False
for local_dev in [
dev
for dev in policy.object_ring.devs
if (
dev
and is_local_device(ips, self.port, dev["replication_ip"], dev["replication_port"])
and (override_devices is None or dev["device"] in override_devices)
)
]:
found_local = True
dev_path = join(self.devices_dir, local_dev["device"])
obj_path = join(dev_path, data_dir)
tmp_path = join(dev_path, get_tmp_dir(policy))
if self.mount_check and not ismount(dev_path):
self._add_failure_stats(
[
(failure_dev["replication_ip"], failure_dev["device"])
for failure_dev in policy.object_ring.devs
if failure_dev
]
)
self.logger.warning(_("%s is not mounted"), local_dev["device"])
continue
unlink_older_than(tmp_path, time.time() - self.reclaim_age)
if not os.path.exists(obj_path):
try:
mkdirs(obj_path)
except Exception:
self.logger.exception("ERROR creating %s" % obj_path)
continue
for partition in os.listdir(obj_path):
if override_partitions is not None and partition not in override_partitions:
continue
part_nodes = None
try:
job_path = join(obj_path, partition)
part_nodes = policy.object_ring.get_part_nodes(int(partition))
nodes = [node for node in part_nodes if node["id"] != local_dev["id"]]
jobs.append(
dict(
path=job_path,
device=local_dev["device"],
obj_path=obj_path,
nodes=nodes,
delete=len(nodes) > len(part_nodes) - 1,
policy=policy,
partition=partition,
region=local_dev["region"],
)
)
except ValueError:
if part_nodes:
self._add_failure_stats(
[(failure_dev["replication_ip"], failure_dev["device"]) for failure_dev in nodes]
)
else:
self._add_failure_stats(
[
(failure_dev["replication_ip"], failure_dev["device"])
for failure_dev in policy.object_ring.devs
if failure_dev
]
)
continue
if not found_local:
self.logger.error(
"Can't find itself %s with port %s in ring " "file, not replicating", ", ".join(ips), self.port
)
return jobs
示例15: run_once
def run_once(self, *args, **kwargs):
"""Run a replication pass once."""
override_options = parse_override_options(once=True, **kwargs)
devices_to_replicate = override_options.devices or Everything()
partitions_to_replicate = override_options.partitions or Everything()
self._zero_stats()
dirs = []
ips = whataremyips(self.bind_ip)
if not ips:
self.logger.error(_('ERROR Failed to get my own IPs?'))
return
if self.handoffs_only:
self.logger.warning(
'Starting replication pass with handoffs_only enabled. '
'This mode is not intended for normal '
'operation; use handoffs_only with care.')
self._local_device_ids = set()
found_local = False
for node in self.ring.devs:
if node and is_local_device(ips, self.port,
node['replication_ip'],
node['replication_port']):
found_local = True
try:
dev_path = check_drive(self.root, node['device'],
self.mount_check)
except ValueError as err:
self._add_failure_stats(
[(failure_dev['replication_ip'],
failure_dev['device'])
for failure_dev in self.ring.devs if failure_dev])
self.logger.warning('Skipping: %s', err)
continue
if node['device'] not in devices_to_replicate:
self.logger.debug(
'Skipping device %s due to given arguments',
node['device'])
continue
unlink_older_than(
os.path.join(dev_path, 'tmp'),
time.time() - self.reclaim_age)
datadir = os.path.join(self.root, node['device'], self.datadir)
if os.path.isdir(datadir):
self._local_device_ids.add(node['id'])
part_filt = self._partition_dir_filter(
node['id'], partitions_to_replicate)
dirs.append((datadir, node['id'], part_filt))
if not found_local:
self.logger.error("Can't find itself %s with port %s in ring "
"file, not replicating",
", ".join(ips), self.port)
self.logger.info(_('Beginning replication run'))
for part, object_file, node_id in self.roundrobin_datadirs(dirs):
self.cpool.spawn_n(
self._replicate_object, part, object_file, node_id)
self.cpool.waitall()
self.logger.info(_('Replication run OVER'))
if self.handoffs_only:
self.logger.warning(
'Finished replication pass with handoffs_only enabled. '
'If handoffs_only is no longer required, disable it.')
self._report_stats()