本文整理汇总了Python中swift.common.utils.renamer函数的典型用法代码示例。如果您正苦于以下问题:Python renamer函数的具体用法?Python renamer怎么用?Python renamer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了renamer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: copy_put
def copy_put(self, fd, tmppath):
tpool.execute(os.fsync, fd)
if self.obj_path:
dir_objs = self.obj_path.split('/')
tmp_path = ''
if len(dir_objs):
for dir_name in dir_objs:
if tmp_path:
tmp_path = tmp_path + '/' + dir_name
else:
tmp_path = dir_name
if not self.create_dir_object(os.path.join(self.container_path,
tmp_path)):
self.logger.error("Failed in subdir %s",\
os.path.join(self.container_path,tmp_path))
return False
renamer(tmppath, os.path.join(self.datadir,
self.obj))
do_chown(os.path.join(self.datadir, self.obj), self.uid, self.gid)
return True
示例2: process_object_update
def process_object_update(self, update_path, device):
"""
Process the object information to be updated and update.
:param update_path: path to pickled object update file
:param device: path to device
"""
try:
update = pickle.load(open(update_path, "rb"))
except Exception:
self.logger.exception(_("ERROR Pickle problem, quarantining %s"), update_path)
renamer(update_path, os.path.join(device, "quarantined", "objects", os.path.basename(update_path)))
return
successes = update.get("successes", [])
part, nodes = self.get_container_ring().get_nodes(update["account"], update["container"])
obj = "/%s/%s/%s" % (update["account"], update["container"], update["obj"])
success = True
for node in nodes:
if node["id"] not in successes:
status = self.object_update(node, part, update["op"], obj, update["headers"])
if not (200 <= status < 300) and status != 404:
success = False
else:
successes.append(node["id"])
if success:
self.successes += 1
self.logger.debug(_("Update sent for %(obj)s %(path)s"), {"obj": obj, "path": update_path})
os.unlink(update_path)
else:
self.failures += 1
self.logger.debug(_("Update failed for %(obj)s %(path)s"), {"obj": obj, "path": update_path})
update["successes"] = successes
write_pickle(update, update_path, os.path.join(device, "tmp"))
示例3: put
def put(self, fd, metadata):
"""
Finalize writing the file on disk, and renames it from the temp file to
the real location. This should be called after the data has been
written to the temp file.
:param fd: file descriptor of the temp file
:param metadata: dictionary of metadata to be written
"""
assert self.tmppath is not None
assert self._type == 0
# wait, what?
#metadata['name'] = self.name
timestamp = normalize_timestamp(metadata['X-Timestamp'])
base_path = os.path.join(self.datadir, timestamp)
# P3
fp = open("/tmp/dump","a")
print >>fp, "posix put old", self.tmppath, "new", base_path
fp.close()
write_meta_file(base_path + '.meta', metadata)
#if 'Content-Length' in metadata:
# self.drop_cache(fd, 0, int(metadata['Content-Length']))
# XXX os.fsync maybe?
#tpool.execute(fsync, fd)
renamer(self.tmppath, base_path + ".data")
# but not setting self.data_file here, is this right?
self.metadata = metadata
示例4: recalculate_hashes
def recalculate_hashes(partition_dir, suffixes, reclaim_age=ONE_WEEK):
"""
Recalculates hashes for the given suffixes in the partition and updates
them in the partition's hashes file.
:param partition_dir: directory of the partition in which to recalculate
:param suffixes: list of suffixes to recalculate
:param reclaim_age: age in seconds at which tombstones should be removed
"""
def tpool_listdir(partition_dir):
return dict(((suff, None) for suff in os.listdir(partition_dir)
if len(suff) == 3 and isdir(join(partition_dir, suff))))
hashes_file = join(partition_dir, HASH_FILE)
with lock_path(partition_dir):
try:
with open(hashes_file, 'rb') as fp:
hashes = pickle.load(fp)
except Exception:
hashes = tpool.execute(tpool_listdir, partition_dir)
for suffix in suffixes:
suffix_dir = join(partition_dir, suffix)
if os.path.exists(suffix_dir):
hashes[suffix] = hash_suffix(suffix_dir, reclaim_age)
elif suffix in hashes:
del hashes[suffix]
with open(hashes_file + '.tmp', 'wb') as fp:
pickle.dump(hashes, fp, PICKLE_PROTOCOL)
renamer(hashes_file + '.tmp', hashes_file)
示例5: quarantine
def quarantine(self, reason):
"""
The database will be quarantined and a
sqlite3.DatabaseError will be raised indicating the action taken.
"""
prefix_path = os.path.dirname(self.db_dir)
partition_path = os.path.dirname(prefix_path)
dbs_path = os.path.dirname(partition_path)
device_path = os.path.dirname(dbs_path)
quar_path = os.path.join(device_path, 'quarantined',
self.db_type + 's',
os.path.basename(self.db_dir))
try:
renamer(self.db_dir, quar_path, fsync=False)
except OSError as e:
if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
raise
quar_path = "%s-%s" % (quar_path, uuid4().hex)
renamer(self.db_dir, quar_path, fsync=False)
detail = _('Quarantined %(db_dir)s to %(quar_path)s due to '
'%(reason)s') % {'db_dir': self.db_dir,
'quar_path': quar_path,
'reason': reason}
self.logger.error(detail)
raise sqlite3.DatabaseError(detail)
示例6: find_and_process
def find_and_process(self):
src_filename = time.strftime(self.filename_format)
working_dir = os.path.join(self.target_dir, ".%-stats_tmp" % self.stats_type)
shutil.rmtree(working_dir, ignore_errors=True)
mkdirs(working_dir)
tmp_filename = os.path.join(working_dir, src_filename)
hasher = hashlib.md5()
try:
with open(tmp_filename, "wb") as statfile:
statfile.write(self.get_header())
for device in os.listdir(self.devices):
if self.mount_check and not check_mount(self.devices, device):
self.logger.error(_("Device %s is not mounted, skipping.") % device)
continue
db_dir = os.path.join(self.devices, device, self.data_dir)
if not os.path.exists(db_dir):
self.logger.debug(_("Path %s does not exist, skipping.") % db_dir)
continue
for root, dirs, files in os.walk(db_dir, topdown=False):
for filename in files:
if filename.endswith(".db"):
db_path = os.path.join(root, filename)
try:
line_data = self.get_data(db_path)
except sqlite3.Error, err:
self.logger.info(_("Error accessing db %s: %s") % (db_path, err))
continue
if line_data:
statfile.write(line_data)
hasher.update(line_data)
src_filename += hasher.hexdigest()
renamer(tmp_filename, os.path.join(self.target_dir, src_filename))
示例7: put
def put(self, metadata, extension='.data'):
"""
Finalize writing the file on disk, and renames it from the temp file
to the real location. This should be called after the data has been
written to the temp file.
:param metadata: dictionary of metadata to be written
:param extension: extension to be used when making the file
"""
assert self.tmppath is not None
timestamp = normalize_timestamp(metadata['X-Timestamp'])
metadata['name'] = self.disk_file.name
# Write the metadata before calling fsync() so that both data and
# metadata are flushed to disk.
write_metadata(self.fd, metadata)
# We call fsync() before calling drop_cache() to lower the amount of
# redundant work the drop cache code will perform on the pages (now
# that after fsync the pages will be all clean).
tpool.execute(fsync, self.fd)
# From the Department of the Redundancy Department, make sure we
# call drop_cache() after fsync() to avoid redundant work (pages
# all clean).
drop_buffer_cache(self.fd, 0, self.upload_size)
invalidate_hash(os.path.dirname(self.disk_file.datadir))
# After the rename completes, this object will be available for other
# requests to reference.
renamer(self.tmppath,
os.path.join(self.disk_file.datadir, timestamp + extension))
self.disk_file.metadata = metadata
示例8: possibly_quarantine
def possibly_quarantine(self, exc_type, exc_value, exc_traceback):
"""
Checks the exception info to see if it indicates a quarantine situation
(malformed or corrupted database). If not, the original exception will
be reraised. If so, the database will be quarantined and a new
sqlite3.DatabaseError will be raised indicating the action taken.
"""
if 'database disk image is malformed' in str(exc_value):
exc_hint = 'malformed'
elif 'file is encrypted or is not a database' in str(exc_value):
exc_hint = 'corrupted'
else:
raise exc_type, exc_value, exc_traceback
prefix_path = os.path.dirname(self.db_dir)
partition_path = os.path.dirname(prefix_path)
dbs_path = os.path.dirname(partition_path)
device_path = os.path.dirname(dbs_path)
quar_path = os.path.join(device_path, 'quarantined',
self.db_type + 's',
os.path.basename(self.db_dir))
try:
renamer(self.db_dir, quar_path)
except OSError as e:
if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
raise
quar_path = "%s-%s" % (quar_path, uuid4().hex)
renamer(self.db_dir, quar_path)
detail = _('Quarantined %s to %s due to %s database') % \
(self.db_dir, quar_path, exc_hint)
self.logger.error(detail)
raise sqlite3.DatabaseError(detail)
示例9: put
def put(self, fd, tmppath, metadata, extension=''):
"""
Finalize writing the file on disk, and renames it from the temp file to
the real location. This should be called after the data has been
written to the temp file.
:params fd: file descriptor of the temp file
:param tmppath: path to the temporary file being used
:param metadata: dictionary of metadata to be written
:param extention: extension to be used when making the file
"""
#Marker dir.
if extension == '.ts':
return True
if extension == '.meta':
self.put_metadata(metadata)
return True
else:
extension = ''
if metadata[X_OBJECT_TYPE] == MARKER_DIR:
self.create_dir_object(os.path.join(self.datadir, self.obj))
self.put_metadata(metadata)
self.data_file = self.datadir + '/' + self.obj
return True
#Check if directory already exists.
if self.is_dir:
self.logger.error('Directory already exists %s/%s' % \
(self.datadir , self.obj))
return False
#metadata['name'] = self.name
timestamp = normalize_timestamp(metadata[X_TIMESTAMP])
write_metadata(tmppath, metadata)
if X_CONTENT_LENGTH in metadata:
self.drop_cache(fd, 0, int(metadata[X_CONTENT_LENGTH]))
tpool.execute(os.fsync, fd)
if self.obj_path:
dir_objs = self.obj_path.split('/')
tmp_path = ''
if len(dir_objs):
for dir_name in dir_objs:
if tmp_path:
tmp_path = tmp_path + '/' + dir_name
else:
tmp_path = dir_name
if not self.create_dir_object(os.path.join(self.container_path,
tmp_path)):
self.logger.error("Failed in subdir %s",\
os.path.join(self.container_path,tmp_path))
return False
renamer(tmppath, os.path.join(self.datadir,
self.obj + extension))
do_chown(os.path.join(self.datadir, self.obj + extension), \
self.uid, self.gid)
self.metadata = metadata
#self.logger.error("Meta %s", self.metadata)
self.data_file = self.datadir + '/' + self.obj + extension
return True
示例10: process_object_update
def process_object_update(self, update_path, device, policy):
"""
Process the object information to be updated and update.
:param update_path: path to pickled object update file
:param device: path to device
:param policy: storage policy of object update
"""
try:
update = pickle.load(open(update_path, 'rb'))
except Exception:
self.logger.exception(
_('ERROR Pickle problem, quarantining %s'), update_path)
self.stats.quarantines += 1
self.logger.increment('quarantines')
target_path = os.path.join(device, 'quarantined', 'objects',
os.path.basename(update_path))
renamer(update_path, target_path, fsync=False)
return
successes = update.get('successes', [])
part, nodes = self.get_container_ring().get_nodes(
update['account'], update['container'])
obj = '/%s/%s/%s' % \
(update['account'], update['container'], update['obj'])
headers_out = HeaderKeyDict(update['headers'])
headers_out['user-agent'] = 'object-updater %s' % os.getpid()
headers_out.setdefault('X-Backend-Storage-Policy-Index',
str(int(policy)))
events = [spawn(self.object_update,
node, part, update['op'], obj, headers_out)
for node in nodes if node['id'] not in successes]
success = True
new_successes = False
for event in events:
event_success, node_id = event.wait()
if event_success is True:
successes.append(node_id)
new_successes = True
else:
success = False
if success:
self.stats.successes += 1
self.logger.increment('successes')
self.logger.debug('Update sent for %(obj)s %(path)s',
{'obj': obj, 'path': update_path})
self.stats.unlinks += 1
self.logger.increment('unlinks')
os.unlink(update_path)
else:
self.stats.failures += 1
self.logger.increment('failures')
self.logger.debug('Update failed for %(obj)s %(path)s',
{'obj': obj, 'path': update_path})
if new_successes:
update['successes'] = successes
write_pickle(update, update_path, os.path.join(
device, get_tmp_dir(policy)))
示例11: complete_rsync
def complete_rsync(self, drive, db_file, args):
old_filename = os.path.join(self.root, drive, 'tmp', args[0])
if os.path.exists(db_file):
return HTTPNotFound()
if not os.path.exists(old_filename):
return HTTPNotFound()
broker = self.broker_class(old_filename)
broker.newid(args[0])
renamer(old_filename, db_file)
return HTTPNoContent()
示例12: put
def put(self, fd, tmppath, metadata, extension=''):
"""
Finalize writing the file on disk, and renames it from the temp file to
the real location. This should be called after the data has been
written to the temp file.
:params fd: file descriptor of the temp file
:param tmppath: path to the temporary file being used
:param metadata: dictionary of metadata to be written
:param extention: extension to be used when making the file
"""
#Marker dir.
if metadata[X_OBJECT_TYPE] == MARKER_DIR:
if os.path.exists(os.path.join(self.datadir, self.obj)) and \
not os.path.isdir(os.path.join(self.datadir, self.obj)):
os.unlink(os.path.join(self.datadir, self.obj))
mkdirs(os.path.join(self.datadir, self.obj))
os.chown(os.path.join(self.datadir, self.obj), self.uid, self.gid)
self.put_metadata(metadata)
self.data_file = self.datadir + '/' + self.obj
return True
#Check if directory already exists.
if self.is_dir:
logging.error('Directory already exists %s/%s' % \
(self.datadir , self.obj))
return False
#metadata['name'] = self.name
timestamp = normalize_timestamp(metadata[X_TIMESTAMP])
write_metadata(fd, metadata)
if X_CONTENT_LENGTH in metadata:
self.drop_cache(fd, 0, int(metadata[X_CONTENT_LENGTH]))
tpool.execute(os.fsync, fd)
if self.obj_path:
dir_objs = self.obj_path.split('/')
tmp_path = ''
if len(dir_objs):
for dir_name in dir_objs:
if tmp_path:
tmp_path = tmp_path + '/' + dir_name
else:
tmp_path = dir_name
if not self.create_dir_object(tmp_path, metadata[X_TIMESTAMP]):
return False
#print 'Gaurav put tmppath', tmppath, os.path.join(self.datadir,
#self.obj+extension)
#invalidate_hash(os.path.dirname(self.datadir))
renamer(tmppath, os.path.join(self.datadir,
self.obj + extension))
os.chown(os.path.join(self.datadir, self.obj + extension), \
self.uid, self.gid)
self.metadata = metadata
self.data_file = self.datadir + '/' + self.obj + extension
return True
示例13: process_object_update
def process_object_update(self, update_path, device, policy_idx):
"""
Process the object information to be updated and update.
:param update_path: path to pickled object update file
:param device: path to device
:param policy_idx: storage policy index of object update
"""
try:
update = pickle.load(open(update_path, 'rb'))
except Exception:
self.logger.exception(
_('ERROR Pickle problem, quarantining %s'), update_path)
self.logger.increment('quarantines')
renamer(update_path, os.path.join(
device, 'quarantined', 'objects',
os.path.basename(update_path)))
return
successes = update.get('successes', [])
part, nodes = self.get_container_ring().get_nodes(
update['account'], update['container'])
obj = '/%s/%s/%s' % \
(update['account'], update['container'], update['obj'])
success = True
new_successes = False
for node in nodes:
if node['id'] not in successes:
headers = update['headers'].copy()
headers.setdefault('X-Backend-Storage-Policy-Index',
str(policy_idx))
status = self.object_update(node, part, update['op'], obj,
headers)
if not is_success(status) and status != HTTP_NOT_FOUND:
success = False
else:
successes.append(node['id'])
new_successes = True
if success:
self.successes += 1
self.logger.increment('successes')
self.logger.debug('Update sent for %(obj)s %(path)s',
{'obj': obj, 'path': update_path})
self.logger.increment("unlinks")
os.unlink(update_path)
else:
self.failures += 1
self.logger.increment('failures')
self.logger.debug('Update failed for %(obj)s %(path)s',
{'obj': obj, 'path': update_path})
if new_successes:
update['successes'] = successes
write_pickle(update, update_path, os.path.join(
device, get_tmp_dir(policy_idx)))
示例14: find_and_process
def find_and_process(self):
src_filename = time.strftime(self.filename_format)
working_dir = os.path.join(self.target_dir, '.stats_tmp')
shutil.rmtree(working_dir, ignore_errors=True)
mkdirs(working_dir)
tmp_filename = os.path.join(working_dir, src_filename)
hasher = hashlib.md5()
with open(tmp_filename, 'wb') as statfile:
# csv has the following columns:
# Account Name, Container Count, Object Count, Bytes Used
for device in os.listdir(self.devices):
if self.mount_check and not check_mount(self.devices, device):
self.logger.error(
_("Device %s is not mounted, skipping.") % device)
continue
accounts = os.path.join(self.devices,
device,
account_server_data_dir)
if not os.path.exists(accounts):
self.logger.debug(_("Path %s does not exist, skipping.") %
accounts)
continue
for root, dirs, files in os.walk(accounts, topdown=False):
for filename in files:
if filename.endswith('.db'):
db_path = os.path.join(root, filename)
broker = AccountBroker(db_path)
if not broker.is_deleted():
(account_name,
_junk, _junk, _junk,
container_count,
object_count,
bytes_used,
_junk, _junk) = broker.get_info()
line_data = '"%s",%d,%d,%d\n' % (
account_name, container_count,
object_count, bytes_used)
statfile.write(line_data)
hasher.update(line_data)
file_hash = hasher.hexdigest()
hash_index = src_filename.find('*')
if hash_index < 0:
# if there is no * in the target filename, the uploader probably
# won't work because we are crafting a filename that doesn't
# fit the pattern
src_filename = '_'.join([src_filename, file_hash])
else:
parts = src_filename[:hash_index], src_filename[hash_index + 1:]
src_filename = ''.join([parts[0], file_hash, parts[1]])
renamer(tmp_filename, os.path.join(self.target_dir, src_filename))
shutil.rmtree(working_dir, ignore_errors=True)
示例15: quarantine_db
def quarantine_db(object_file, server_type):
"""
In the case that a corrupt file is found, move it to a quarantined area to
allow replication to fix it.
:param object_file: path to corrupt file
:param server_type: type of file that is corrupt
('container' or 'account')
"""
object_dir = os.path.dirname(object_file)
quarantine_dir = os.path.abspath(os.path.join(object_dir, '..',
'..', '..', '..', 'quarantined', server_type + 's',
os.path.basename(object_dir)))
renamer(object_dir, quarantine_dir)