本文整理汇总了Python中syncdutils.errno_wrap函数的典型用法代码示例。如果您正苦于以下问题:Python errno_wrap函数的具体用法?Python errno_wrap怎么用?Python errno_wrap使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了errno_wrap函数的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: meta_ops
def meta_ops(cls, meta_entries):
logging.debug('Meta-entries: %s' % repr(meta_entries))
for e in meta_entries:
mode = e['stat']['mode']
uid = e['stat']['uid']
gid = e['stat']['gid']
go = e['go']
errno_wrap(os.chmod, [go, mode], [ENOENT], [ESTALE, EINVAL])
errno_wrap(os.chown, [go, uid, gid], [ENOENT], [ESTALE, EINVAL])
示例2: monitor
def monitor(*resources):
# Check if gsyncd restarted in pause state. If
# yes, send SIGSTOP to negative of monitor pid
# to go back to pause state.
if gconf.pause_on_start:
errno_wrap(os.kill, [-os.getpid(), signal.SIGSTOP], [ESRCH])
"""oh yeah, actually Monitor is used as singleton, too"""
return Monitor().multiplex(*distribute(*resources))
示例3: wmon
def wmon(w):
cpid, _ = self.monitor(w, argv, cpids, agents, slave_vol,
slave_host, master)
time.sleep(1)
self.lock.acquire()
for cpid in cpids:
errno_wrap(os.kill, [cpid, signal.SIGKILL], [ESRCH])
for apid in agents:
errno_wrap(os.kill, [apid, signal.SIGKILL], [ESRCH])
self.lock.release()
finalize(exval=1)
示例4: entry_purge
def entry_purge(entry, gfid):
# This is an extremely racy code and needs to be fixed ASAP.
# The GFID check here is to be sure that the pargfid/bname
# to be purged is the GFID gotten from the changelog.
# (a stat(changelog_gfid) would also be valid here)
# The race here is between the GFID check and the purge.
disk_gfid = cls.gfid_mnt(entry)
if isinstance(disk_gfid, int):
return
if not gfid == disk_gfid:
return
er = errno_wrap(os.unlink, [entry], [ENOENT, EISDIR])
if isinstance(er, int):
if er == EISDIR:
er = errno_wrap(os.rmdir, [entry], [ENOENT, ENOTEMPTY])
if er == ENOTEMPTY:
return er
示例5: entry_ops
def entry_ops(cls, entries):
pfx = gauxpfx()
logging.debug('entries: %s' % repr(entries))
# regular file
def entry_pack_reg(gf, bn, mo, uid, gid):
blen = len(bn)
return struct.pack(cls._fmt_mknod(blen),
uid, gid, gf, mo, bn,
stat.S_IMODE(mo), 0, umask())
def entry_pack_reg_stat(gf, bn, st):
blen = len(bn)
mo = st['mode']
return struct.pack(cls._fmt_mknod(blen),
st['uid'], st['gid'],
gf, mo, bn,
stat.S_IMODE(mo), 0, umask())
# mkdir
def entry_pack_mkdir(gf, bn, mo, uid, gid):
blen = len(bn)
return struct.pack(cls._fmt_mkdir(blen),
uid, gid, gf, mo, bn,
stat.S_IMODE(mo), umask())
# symlink
def entry_pack_symlink(gf, bn, lnk, st):
blen = len(bn)
llen = len(lnk)
return struct.pack(cls._fmt_symlink(blen, llen),
st['uid'], st['gid'],
gf, st['mode'], bn, lnk)
def entry_purge(entry, gfid):
# This is an extremely racy code and needs to be fixed ASAP.
# The GFID check here is to be sure that the pargfid/bname
# to be purged is the GFID gotten from the changelog.
# (a stat(changelog_gfid) would also be valid here)
# The race here is between the GFID check and the purge.
disk_gfid = cls.gfid_mnt(entry)
if isinstance(disk_gfid, int):
return
if not gfid == disk_gfid:
return
er = errno_wrap(os.unlink, [entry], [ENOENT, EISDIR])
if isinstance(er, int):
if er == EISDIR:
er = errno_wrap(os.rmdir, [entry], [ENOENT, ENOTEMPTY])
if er == ENOTEMPTY:
return er
for e in entries:
blob = None
op = e['op']
gfid = e['gfid']
entry = e['entry']
(pg, bname) = entry2pb(entry)
if op in ['RMDIR', 'UNLINK']:
while True:
er = entry_purge(entry, gfid)
if isinstance(er, int):
time.sleep(1)
else:
break
elif op in ['CREATE', 'MKNOD']:
blob = entry_pack_reg(
gfid, bname, e['mode'], e['uid'], e['uid'])
elif op == 'MKDIR':
blob = entry_pack_mkdir(
gfid, bname, e['mode'], e['uid'], e['uid'])
elif op == 'LINK':
slink = os.path.join(pfx, gfid)
st = lstat(slink)
if isinstance(st, int):
(pg, bname) = entry2pb(entry)
blob = entry_pack_reg_stat(gfid, bname, e['stat'])
else:
errno_wrap(os.link, [slink, entry], [ENOENT, EEXIST])
elif op == 'SYMLINK':
blob = entry_pack_symlink(gfid, bname, e['link'], e['stat'])
elif op == 'RENAME':
en = e['entry1']
st = lstat(entry)
if isinstance(st, int):
(pg, bname) = entry2pb(en)
blob = entry_pack_reg_stat(gfid, bname, e['stat'])
else:
errno_wrap(os.rename, [entry, en], [ENOENT, EEXIST])
if blob:
errno_wrap(Xattr.lsetxattr_l, [pg, 'glusterfs.gfid.newfile',
blob],
[EEXIST], [ENOENT, ESTALE, EINVAL])
示例6: gfid_mnt
def gfid_mnt(cls, gfidpath):
return errno_wrap(Xattr.lgetxattr,
[gfidpath, 'glusterfs.gfid.string',
cls.GX_GFID_CANONICAL_LEN], [ENOENT])
示例7: terminate
def terminate():
# relax one SIGTERM by setting a handler that sets back
# standard handler
set_term_handler(lambda *a: set_term_handler())
# give a chance to graceful exit
errno_wrap(os.kill, [-os.getpid(), signal.SIGTERM], [ESRCH])
示例8: crawl
def crawl(self, path='.', xtr=None, done=0):
""" generate a CHANGELOG file consumable by process_change """
if path == '.':
self.open()
self.crawls += 1
if not xtr:
# get the root stime and use it for all comparisons
xtr = self.xtime('.', self.slave)
if isinstance(xtr, int):
if xtr != ENOENT:
raise GsyncdError('slave is corrupt')
xtr = self.minus_infinity
xtl = self.xtime(path)
if isinstance(xtl, int):
raise GsyncdError('master is corrupt')
if xtr == xtl:
if path == '.':
self.close()
return
self.xtime_reversion_hook(path, xtl, xtr)
logging.debug("entering " + path)
dem = self.master.server.entries(path)
pargfid = self.master.server.gfid(path)
if isinstance(pargfid, int):
logging.warn('skipping directory %s' % (path))
for e in dem:
bname = e
e = os.path.join(path, e)
st = lstat(e)
if isinstance(st, int):
logging.warn('%s got purged in the interim..' % e)
continue
gfid = self.master.server.gfid(e)
if isinstance(gfid, int):
logging.warn('skipping entry %s..' % (e))
continue
xte = self.xtime(e)
if isinstance(xte, int):
raise GsyncdError('master is corrupt')
if not self.need_sync(e, xte, xtr):
continue
mo = st.st_mode
if stat.S_ISDIR(mo):
self.write_entry_change("E", [gfid, 'MKDIR', escape(os.path.join(pargfid, bname))])
self.crawl(e, xtr)
elif stat.S_ISLNK(mo):
rl = errno_wrap(os.readlink, [en], [ENOENT])
if isinstance(rl, int):
continue
self.write_entry_change("E", [gfid, 'SYMLINK', escape(os.path.join(pargfid, bname)), rl])
else:
# if a file has a hardlink, create a Changelog entry as 'LINK' so the slave
# side will decide if to create the new entry, or to create link.
if st.st_nlink == 1:
self.write_entry_change("E", [gfid, 'MKNOD', escape(os.path.join(pargfid, bname))])
else:
self.write_entry_change("E", [gfid, 'LINK', escape(os.path.join(pargfid, bname))])
if stat.S_ISREG(mo):
self.write_entry_change("D", [gfid])
if path == '.':
logging.info('processing xsync changelog %s' % self.fname())
self.close()
self.process([self.fname()], done)
self.upd_stime(xtl)
示例9: entry_ops
def entry_ops(cls, entries):
pfx = gauxpfx()
logging.debug("entries: %s" % repr(entries))
# regular file
def entry_pack_reg(gf, bn, mo, uid, gid):
blen = len(bn)
return struct.pack(cls._fmt_mknod(blen), uid, gid, gf, mo, bn, stat.S_IMODE(mo), 0, umask())
def entry_pack_reg_stat(gf, bn, st):
blen = len(bn)
mo = st["mode"]
return struct.pack(cls._fmt_mknod(blen), st["uid"], st["gid"], gf, mo, bn, stat.S_IMODE(mo), 0, umask())
# mkdir
def entry_pack_mkdir(gf, bn, mo, uid, gid):
blen = len(bn)
return struct.pack(cls._fmt_mkdir(blen), uid, gid, gf, mo, bn, stat.S_IMODE(mo), umask())
# symlink
def entry_pack_symlink(gf, bn, lnk, st):
blen = len(bn)
llen = len(lnk)
return struct.pack(cls._fmt_symlink(blen, llen), st["uid"], st["gid"], gf, st["mode"], bn, lnk)
def entry_purge(entry, gfid):
# This is an extremely racy code and needs to be fixed ASAP.
# The GFID check here is to be sure that the pargfid/bname
# to be purged is the GFID gotten from the changelog.
# (a stat(changelog_gfid) would also be valid here)
# The race here is between the GFID check and the purge.
disk_gfid = cls.gfid_mnt(entry)
if isinstance(disk_gfid, int):
return
if not gfid == disk_gfid:
return
er = errno_wrap(os.unlink, [entry], [ENOENT, EISDIR])
if isinstance(er, int):
if er == EISDIR:
er = errno_wrap(os.rmdir, [entry], [ENOENT, ENOTEMPTY])
if er == ENOTEMPTY:
return er
for e in entries:
blob = None
op = e["op"]
gfid = e["gfid"]
entry = e["entry"]
(pg, bname) = entry2pb(entry)
if op in ["RMDIR", "UNLINK"]:
while True:
er = entry_purge(entry, gfid)
if isinstance(er, int):
if er == ENOTEMPTY and op == "RMDIR":
er1 = errno_wrap(shutil.rmtree, [os.path.join(pg, bname)], [ENOENT])
if not isinstance(er1, int):
logging.info("Removed %s/%s recursively" % (pg, bname))
break
logging.warn("Failed to remove %s => %s/%s. %s" % (gfid, pg, bname, os.strerror(er)))
time.sleep(1)
else:
break
elif op in ["CREATE", "MKNOD"]:
blob = entry_pack_reg(gfid, bname, e["mode"], e["uid"], e["gid"])
elif op == "MKDIR":
blob = entry_pack_mkdir(gfid, bname, e["mode"], e["uid"], e["gid"])
elif op == "LINK":
slink = os.path.join(pfx, gfid)
st = lstat(slink)
if isinstance(st, int):
(pg, bname) = entry2pb(entry)
blob = entry_pack_reg_stat(gfid, bname, e["stat"])
else:
errno_wrap(os.link, [slink, entry], [ENOENT, EEXIST])
elif op == "SYMLINK":
blob = entry_pack_symlink(gfid, bname, e["link"], e["stat"])
elif op == "RENAME":
en = e["entry1"]
st = lstat(entry)
if isinstance(st, int):
if e["stat"] and not stat.S_ISDIR(e["stat"]["mode"]):
(pg, bname) = entry2pb(en)
blob = entry_pack_reg_stat(gfid, bname, e["stat"])
else:
errno_wrap(os.rename, [entry, en], [ENOENT, EEXIST])
if blob:
errno_wrap(Xattr.lsetxattr, [pg, "glusterfs.gfid.newfile", blob], [EEXIST], [ENOENT, ESTALE, EINVAL])
示例10: process_change
def process_change(self, change, done, retry):
pfx = gauxpfx()
clist = []
entries = []
datas = set()
# basic crawl stats: files and bytes
files_pending = {'count': 0, 'purge': 0, 'bytes': 0, 'files': []}
try:
f = open(change, "r")
clist = f.readlines()
f.close()
except IOError:
raise
def edct(op, **ed):
dct = {}
dct['op'] = op
for k in ed:
if k == 'stat':
st = ed[k]
dst = dct['stat'] = {}
dst['uid'] = st.st_uid
dst['gid'] = st.st_gid
dst['mode'] = st.st_mode
else:
dct[k] = ed[k]
return dct
# regular file update: bytes & count
def _update_reg(entry, size):
if not entry in files_pending['files']:
files_pending['count'] += 1
files_pending['bytes'] += size
files_pending['files'].append(entry)
# updates for directories, symlinks etc..
def _update_rest():
files_pending['count'] += 1
# entry count
def entry_update(entry, size, mode):
if stat.S_ISREG(mode):
_update_reg(entry, size)
else:
_update_rest()
# purge count
def purge_update():
files_pending['purge'] += 1
for e in clist:
e = e.strip()
et = e[self.IDX_START:self.IDX_END]
ec = e[self.IDX_END:].split(' ')
if et in self.TYPE_ENTRY:
ty = ec[self.POS_TYPE]
en = unescape(os.path.join(pfx, ec[self.POS_ENTRY1]))
gfid = ec[self.POS_GFID]
# definitely need a better way bucketize entry ops
if ty in ['UNLINK', 'RMDIR']:
purge_update()
entries.append(edct(ty, gfid=gfid, entry=en))
continue
go = os.path.join(pfx, gfid)
st = lstat(go)
if isinstance(st, int):
if ty == 'RENAME':
entries.append(edct('UNLINK', gfid=gfid, entry=en))
else:
logging.debug('file %s got purged in the interim' % go)
continue
entry_update(go, st.st_size, st.st_mode)
if ty in ['CREATE', 'MKDIR', 'MKNOD']:
entries.append(edct(ty, stat=st, entry=en, gfid=gfid))
elif ty == 'LINK':
entries.append(edct(ty, stat=st, entry=en, gfid=gfid))
elif ty == 'SYMLINK':
rl = errno_wrap(os.readlink, [en], [ENOENT])
if isinstance(rl, int):
continue
entries.append(edct(ty, stat=st, entry=en, gfid=gfid, link=rl))
elif ty == 'RENAME':
e2 = unescape(os.path.join(pfx, ec[self.POS_ENTRY2]))
entries.append(edct(ty, gfid=gfid, entry=en, entry1=e2, stat=st))
else:
logging.warn('ignoring %s [op %s]' % (gfid, ty))
elif et in self.TYPE_GFID:
go = os.path.join(pfx, ec[0])
st = lstat(go)
if isinstance(st, int):
logging.debug('file %s got purged in the interim' % go)
continue
entry_update(go, st.st_size, st.st_mode)
datas.update([go])
logging.debug('entries: %s' % repr(entries))
if not retry:
self.update_cumulative_stats(files_pending)
# sync namespace
if (entries):
self.slave.server.entry_ops(entries)
# sync data
#.........这里部分代码省略.........
示例11: subcmd_delete
def subcmd_delete(args):
import logging
import shutil
import glob
import sys
from errno import ENOENT, ENODATA
import struct
from syncdutils import GsyncdError, Xattr, errno_wrap
import gsyncdconfig as gconf
logging.info('geo-replication delete')
# remove the stime xattr from all the brick paths so that
# a re-create of a session will start sync all over again
stime_xattr_prefix = gconf.get('stime-xattr-prefix', None)
# Delete pid file, status file, socket file
cleanup_paths = []
cleanup_paths.append(gconf.get("pid-file"))
# Cleanup Session dir
try:
shutil.rmtree(gconf.get("georep-session-working-dir"))
except (IOError, OSError):
if sys.exc_info()[1].errno == ENOENT:
pass
else:
raise GsyncdError(
'Error while removing working dir: %s' %
gconf.get("georep-session-working-dir"))
# Cleanup changelog working dirs
try:
shutil.rmtree(gconf.get("working-dir"))
except (IOError, OSError):
if sys.exc_info()[1].errno == ENOENT:
pass
else:
raise GsyncdError(
'Error while removing working dir: %s' %
gconf.get("working-dir"))
for path in cleanup_paths:
# To delete temp files
for f in glob.glob(path + "*"):
_unlink(f)
if args.reset_sync_time and stime_xattr_prefix:
for p in args.paths:
if p != "":
# set stime to (0,0) to trigger full volume content resync
# to slave on session recreation
# look at master.py::Xcrawl hint: zero_zero
errno_wrap(Xattr.lsetxattr,
(p, stime_xattr_prefix + ".stime",
struct.pack("!II", 0, 0)),
[ENOENT, ENODATA])
errno_wrap(Xattr.lremovexattr,
(p, stime_xattr_prefix + ".entry_stime"),
[ENOENT, ENODATA])
return
示例12: monitor
#.........这里部分代码省略.........
logging.debug("Worker would mount volume privately")
unshare_cmd = ['unshare', '-m', '--propagation',
'private']
cmd = unshare_cmd + args_to_worker
os.execvp("unshare", cmd)
else:
logging.debug("Mount is not private. It would be lazy"
" umounted")
os.execv(sys.executable, args_to_worker)
cpids.add(cpid)
agents.add(apid)
os.close(pw)
# close all RPC pipes in monitor
os.close(ra)
os.close(wa)
os.close(rw)
os.close(ww)
self.lock.release()
t0 = time.time()
so = select((pr,), (), (), conn_timeout)[0]
os.close(pr)
if so:
ret = nwait(cpid, os.WNOHANG)
ret_agent = nwait(apid, os.WNOHANG)
if ret_agent is not None:
# Agent is died Kill Worker
logging.info(lf("Changelog Agent died, Aborting Worker",
brick=w[0]['dir']))
errno_wrap(os.kill, [cpid, signal.SIGKILL], [ESRCH])
nwait(cpid)
nwait(apid)
if ret is not None:
logging.info(lf("worker died before establishing "
"connection",
brick=w[0]['dir']))
nwait(apid) # wait for agent
else:
logging.debug("worker(%s) connected" % w[0]['dir'])
while time.time() < t0 + conn_timeout:
ret = nwait(cpid, os.WNOHANG)
ret_agent = nwait(apid, os.WNOHANG)
if ret is not None:
logging.info(lf("worker died in startup phase",
brick=w[0]['dir']))
nwait(apid) # wait for agent
break
if ret_agent is not None:
# Agent is died Kill Worker
logging.info(lf("Changelog Agent died, Aborting "
"Worker",
brick=w[0]['dir']))
errno_wrap(os.kill, [cpid, signal.SIGKILL],
[ESRCH])
nwait(cpid)
nwait(apid)
break
time.sleep(1)
示例13: entry_ops
def entry_ops(cls, entries):
pfx = gauxpfx()
logging.debug("entries: %s" % repr(entries))
# regular file
def entry_pack_reg(gf, bn, st):
blen = len(bn)
mo = st["mode"]
return struct.pack(cls._fmt_mknod(blen), st["uid"], st["gid"], gf, mo, bn, stat.S_IMODE(mo), 0, umask())
# mkdir
def entry_pack_mkdir(gf, bn, st):
blen = len(bn)
mo = st["mode"]
return struct.pack(cls._fmt_mkdir(blen), st["uid"], st["gid"], gf, mo, bn, stat.S_IMODE(mo), umask())
# symlink
def entry_pack_symlink(gf, bn, lnk, st):
blen = len(bn)
llen = len(lnk)
return struct.pack(cls._fmt_symlink(blen, llen), st["uid"], st["gid"], gf, st["mode"], bn, lnk)
def entry_purge(entry, gfid):
# This is an extremely racy code and needs to be fixed ASAP.
# The GFID check here is to be sure that the pargfid/bname
# to be purged is the GFID gotten from the changelog.
# (a stat(changelog_gfid) would also be valid here)
# The race here is between the GFID check and the purge.
disk_gfid = cls.gfid(entry)
if isinstance(disk_gfid, int):
return
if not gfid == disk_gfid:
return
er = errno_wrap(os.unlink, [entry], [ENOENT, EISDIR])
if isinstance(er, int):
if er == EISDIR:
er = errno_wrap(os.rmdir, [entry], [ENOENT, ENOTEMPTY])
if er == ENOTEMPTY:
return er
for e in entries:
blob = None
op = e["op"]
gfid = e["gfid"]
entry = e["entry"]
(pg, bname) = entry2pb(entry)
if op in ["RMDIR", "UNLINK"]:
while True:
er = entry_purge(entry, gfid)
if isinstance(er, int):
time.sleep(1)
else:
break
elif op == "CREATE":
blob = entry_pack_reg(gfid, bname, e["stat"])
elif op == "MKDIR":
blob = entry_pack_mkdir(gfid, bname, e["stat"])
elif op == "LINK":
errno_wrap(os.link, [os.path.join(pfx, gfid), entry], [ENOENT, EEXIST])
elif op == "SYMLINK":
blob = entry_pack_symlink(gfid, bname, e["link"], e["stat"])
elif op == "RENAME":
en = e["entry1"]
errno_wrap(os.rename, [entry, en], [ENOENT, EEXIST])
if blob:
errno_wrap(Xattr.lsetxattr_l, [pg, "glusterfs.gfid.newfile", blob], [ENOENT, EEXIST])
示例14: gfid
def gfid(cls, gfidpath):
return errno_wrap(Xattr.lgetxattr, [gfidpath, "glusterfs.gfid", cls.GX_GFID_CANONICAL_LEN], [ENOENT])