本文整理汇总了Python中mercurial.scmutil.opener函数的典型用法代码示例。如果您正苦于以下问题:Python opener函数的具体用法?Python opener怎么用?Python opener使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了opener函数的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, ui, path):
checktool('svn', debname='subversion')
checktool('svnadmin', debname='subversion')
converter_sink.__init__(self, ui, path)
commandline.__init__(self, ui, 'svn')
self.delete = []
self.setexec = []
self.delexec = []
self.copies = []
self.wc = None
self.cwd = os.getcwd()
created = False
if os.path.isfile(os.path.join(path, '.svn', 'entries')):
self.wc = os.path.realpath(path)
self.run0('update')
else:
if not re.search(r'^(file|http|https|svn|svn\+ssh)\://', path):
path = os.path.realpath(path)
if os.path.isdir(os.path.dirname(path)):
if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
ui.status(_('initializing svn repository %r\n') %
os.path.basename(path))
commandline(ui, 'svnadmin').run0('create', path)
created = path
path = util.normpath(path)
if not path.startswith('/'):
path = '/' + path
path = 'file://' + path
wcpath = os.path.join(os.getcwd(), os.path.basename(path) + '-wc')
ui.status(_('initializing svn working copy %r\n')
% os.path.basename(wcpath))
self.run0('checkout', path, wcpath)
self.wc = wcpath
self.opener = scmutil.opener(self.wc)
self.wopener = scmutil.opener(self.wc)
self.childmap = mapfile(ui, self.join('hg-childmap'))
self.is_exec = util.checkexec(self.wc) and util.isexec or None
if created:
hook = os.path.join(created, 'hooks', 'pre-revprop-change')
fp = open(hook, 'w')
fp.write(pre_revprop_change)
fp.close()
util.setflags(hook, False, True)
output = self.run0('info')
self.uuid = self.uuid_re.search(output).group(1).strip()
示例2: __init__
def __init__(self, ui, repo):
self.ui = ui
self.path = repo.join('transplant')
self.opener = scmutil.opener(self.path)
self.transplants = transplants(self.path, 'transplants',
opener=self.opener)
self.editor = None
示例3: openlfdirstate
def openlfdirstate(ui, repo):
'''
Return a dirstate object that tracks largefiles: i.e. its root is
the repo root, but it is saved in .hg/largefiles/dirstate.
'''
admin = repo.join(longname)
opener = scmutil.opener(admin)
lfdirstate = largefiles_dirstate(opener, ui, repo.root,
repo.dirstate._validate)
# If the largefiles dirstate does not exist, populate and create
# it. This ensures that we create it on the first meaningful
# largefiles operation in a new clone.
if not os.path.exists(os.path.join(admin, 'dirstate')):
util.makedirs(admin)
matcher = getstandinmatcher(repo)
for standin in dirstate_walk(repo.dirstate, matcher):
lfile = splitstandin(standin)
hash = readstandin(repo, lfile)
lfdirstate.normallookup(lfile)
try:
if hash == hashfile(repo.wjoin(lfile)):
lfdirstate.normal(lfile)
except OSError, err:
if err.errno != errno.ENOENT:
raise
示例4: basic
def basic(repo):
# file doesn't exist, calls function
repo.cached
repo.invalidate()
# file still doesn't exist, uses cache
repo.cached
# create empty file
f = open('x', 'w')
f.close()
repo.invalidate()
# should recreate the object
repo.cached
f = open('x', 'w')
f.write('a')
f.close()
repo.invalidate()
# should recreate the object
repo.cached
repo.invalidate()
# stats file again, nothing changed, reuses object
repo.cached
# atomic replace file, size doesn't change
# hopefully st_mtime doesn't change as well so this doesn't use the cache
# because of inode change
f = scmutil.opener('.')('x', 'w', atomictemp=True)
f.write('b')
f.close()
repo.invalidate()
repo.cached
示例5: __init__
def __init__(self, ui, baseui, path, patchdir=None):
self.basepath = path
try:
fh = open(os.path.join(path, 'patches.queue'))
cur = fh.read().rstrip()
fh.close()
if not cur:
curpath = os.path.join(path, 'patches')
else:
curpath = os.path.join(path, 'patches-' + cur)
except IOError:
curpath = os.path.join(path, 'patches')
self.path = patchdir or curpath
self.opener = scmutil.opener(self.path)
self.ui = ui
self.baseui = baseui
self.applieddirty = False
self.seriesdirty = False
self.added = []
self.seriespath = "series"
self.statuspath = "status"
self.guardspath = "guards"
self.activeguards = None
self.guardsdirty = False
# Handle mq.git as a bool with extended values
try:
gitmode = ui.configbool('mq', 'git', None)
if gitmode is None:
raise error.ConfigError
self.gitmode = gitmode and 'yes' or 'no'
except error.ConfigError:
self.gitmode = ui.config('mq', 'git', 'auto').lower()
self.plainmode = ui.configbool('mq', 'plain', False)
示例6: openlfdirstate
def openlfdirstate(ui, repo, create=True):
'''
Return a dirstate object that tracks largefiles: i.e. its root is
the repo root, but it is saved in .hg/largefiles/dirstate.
'''
vfs = repo.vfs
lfstoredir = longname
opener = scmutil.opener(vfs.join(lfstoredir))
lfdirstate = largefilesdirstate(opener, ui, repo.root,
repo.dirstate._validate)
# If the largefiles dirstate does not exist, populate and create
# it. This ensures that we create it on the first meaningful
# largefiles operation in a new clone.
if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
matcher = getstandinmatcher(repo)
standins = repo.dirstate.walk(matcher, [], False, False)
if len(standins) > 0:
vfs.makedirs(lfstoredir)
for standin in standins:
lfile = splitstandin(standin)
lfdirstate.normallookup(lfile)
return lfdirstate
示例7: openlfdirstate
def openlfdirstate(ui, repo):
'''
Return a dirstate object that tracks largefiles: i.e. its root is
the repo root, but it is saved in .hg/largefiles/dirstate.
'''
admin = repo.join(longname)
opener = scmutil.opener(admin)
lfdirstate = largefiles_dirstate(opener, ui, repo.root,
repo.dirstate._validate)
# If the largefiles dirstate does not exist, populate and create
# it. This ensures that we create it on the first meaningful
# largefiles operation in a new clone. It also gives us an easy
# way to forcibly rebuild largefiles state:
# rm .hg/largefiles/dirstate && hg status
# Or even, if things are really messed up:
# rm -rf .hg/largefiles && hg status
if not os.path.exists(os.path.join(admin, 'dirstate')):
util.makedirs(admin)
matcher = getstandinmatcher(repo)
for standin in dirstate_walk(repo.dirstate, matcher):
lfile = splitstandin(standin)
hash = readstandin(repo, lfile)
lfdirstate.normallookup(lfile)
try:
if hash == hashfile(lfile):
lfdirstate.normal(lfile)
except IOError, err:
if err.errno != errno.ENOENT:
raise
lfdirstate.write()
示例8: __init__
def __init__(self, ui, repo, opts):
self.ui = ui
self.path = repo.join('transplant')
self.opener = scmutil.opener(self.path)
self.transplants = transplants(self.path, 'transplants',
opener=self.opener)
def getcommiteditor():
editform = cmdutil.mergeeditform(repo[None], 'transplant')
return cmdutil.getcommiteditor(editform=editform, **opts)
self.getcommiteditor = getcommiteditor
示例9: _open_bfdirstate
def _open_bfdirstate(ui, repo, correct=True):
'''
Return a dirstate object that tracks big files: i.e. its root is the
repo root, but it is saved in .hg/bfiles/dirstate.
'''
admin = repo.join('bfiles')
opener = scmutil.opener(admin)
bfdirstate = open_dirstate(opener, ui, repo.root)
# If the bfiles dirstate does not exist, populate and create it. This
# ensures that we create it on the first meaningful bfiles operation in
# a new clone. It also gives us an easy way to forcibly rebuild bfiles
# state:
# rm .hg/bfiles/dirstate && hg bfstatus
# Or even, if things are really messed up:
# rm -rf .hg/bfiles && hg bfstatus
# (although that can lose data, e.g. pending big file revisions in
# .hg/bfiles/{pending,committed}).
if not os.path.exists(os.path.join(admin, 'dirstate')):
util.makedirs(admin)
matcher = _get_standin_matcher(repo)
for standin in dirstate_walk(repo.dirstate, matcher):
bigfile = _split_standin(standin)
state = repo.dirstate[standin]
if state == 'r':
bfdirstate.remove(bigfile)
continue
hash = _read_standin(repo, standin)
try:
curhash = _hashfile(open(bigfile, 'rb'))
except IOError, err:
if err.errno == errno.ENOENT:
bfdirstate.normallookup(bigfile)
else:
raise
else:
if curhash == hash:
bfdirstate.normal(bigfile)
else:
bfdirstate.normallookup(bigfile)
bfdirstate.write()
示例10: __init__
def __init__(self, ui, path, patchdir=None):
"""initializes everything, this was copied from mq"""
self.basepath = path
self.path = patchdir or os.path.join(path, 'attic')
if scmutil:
# since version 1.9 scmutil.opener is the right function
self.opener = scmutil.opener(self.path, False)
else:
# we are at an older version, fall back
self.opener = util.opener(self.path, False)
self.ui = ui
self.applied = ''
self.appliedfile = '.applied'
self.currentpatch = ''
self.currentfile = '.current'
if not os.path.isdir(self.path):
try:
os.mkdir(self.path)
except OSError, inst:
if inst.errno != errno.EEXIST or not create:
raise
示例11: snapshot
def snapshot(ui, repo, files, node, tmproot):
'''snapshot files as of some revision
if not using snapshot, -I/-X does not work and recursive diff
in tools like kdiff3 and meld displays too many files.'''
dirname = os.path.basename(repo.root)
if dirname == "":
dirname = "root"
if node is not None:
dirname = '%s.%s' % (dirname, short(node))
base = os.path.join(tmproot, dirname)
os.mkdir(base)
if node is not None:
ui.note(_('making snapshot of %d files from rev %s\n') %
(len(files), short(node)))
else:
ui.note(_('making snapshot of %d files from working directory\n') %
(len(files)))
wopener = scmutil.opener(base)
fns_and_mtime = []
ctx = repo[node]
for fn in sorted(files):
wfn = util.pconvert(fn)
if wfn not in ctx:
# File doesn't exist; could be a bogus modify
continue
ui.note(' %s\n' % wfn)
dest = os.path.join(base, wfn)
fctx = ctx[wfn]
data = repo.wwritedata(wfn, fctx.data())
if 'l' in fctx.flags():
wopener.symlink(data, wfn)
else:
wopener.write(wfn, data)
if 'x' in fctx.flags():
util.setflags(dest, False, True)
if node is None:
fns_and_mtime.append((dest, repo.wjoin(fn),
os.lstat(dest).st_mtime))
return dirname, fns_and_mtime
示例12: shrink
def shrink(ui, repo, **opts):
"""shrink a revlog by reordering revisions
Rewrites all the entries in some revlog of the current repository
(by default, the manifest log) to save space.
Different sort algorithms have different performance
characteristics. Use ``--sort`` to select a sort algorithm so you
can determine which works best for your data.
"""
if not repo.local():
raise util.Abort(_('not a local repository: %s') % repo.root)
fn = opts.get('revlog')
if not fn:
indexfn = repo.sjoin('00manifest.i')
else:
if not fn.endswith('.i'):
raise util.Abort(_('--revlog option must specify the revlog index '
'file (*.i), not %s') % opts.get('revlog'))
indexfn = os.path.realpath(fn)
store = repo.sjoin('')
if not indexfn.startswith(store):
raise util.Abort(_('--revlog option must specify a revlog in %s, '
'not %s') % (store, indexfn))
sortname = opts['sort']
try:
toposort = globals()['toposort_' + sortname]
except KeyError:
raise util.Abort(_('no such toposort algorithm: %s') % sortname)
if not os.path.exists(indexfn):
raise util.Abort(_('no such file: %s') % indexfn)
if '00changelog' in indexfn:
raise util.Abort(_('shrinking the changelog '
'will corrupt your repository'))
ui.write(_('shrinking %s\n') % indexfn)
tmpindexfn = util.mktempcopy(indexfn, emptyok=True)
r1 = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), indexfn)
r2 = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), tmpindexfn)
datafn, tmpdatafn = r1.datafile, r2.datafile
oldindexfn = indexfn + '.old'
olddatafn = datafn + '.old'
if os.path.exists(oldindexfn) or os.path.exists(olddatafn):
raise util.Abort(_('one or both of\n'
' %s\n'
' %s\n'
'exists from a previous run; please clean up '
'before running again') % (oldindexfn, olddatafn))
# Don't use repo.transaction(), because then things get hairy with
# paths: some need to be relative to .hg, and some need to be
# absolute. Doing it this way keeps things simple: everything is an
# absolute path.
lock = repo.lock(wait=False)
tr = transaction.transaction(ui.warn,
open,
repo.sjoin('journal'))
def ignoremissing(func):
def f(*args, **kw):
try:
return func(*args, **kw)
except OSError, inst:
if inst.errno != errno.ENOENT:
raise
return f
示例13: basic
def basic(repo):
print "* neither file exists"
# calls function
repo.cached
repo.invalidate()
print "* neither file still exists"
# uses cache
repo.cached
# create empty file
f = open('x', 'w')
f.close()
repo.invalidate()
print "* empty file x created"
# should recreate the object
repo.cached
f = open('x', 'w')
f.write('a')
f.close()
repo.invalidate()
print "* file x changed size"
# should recreate the object
repo.cached
repo.invalidate()
print "* nothing changed with either file"
# stats file again, reuses object
repo.cached
# atomic replace file, size doesn't change
# hopefully st_mtime doesn't change as well so this doesn't use the cache
# because of inode change
f = scmutil.opener('.')('x', 'w', atomictemp=True)
f.write('b')
f.close()
repo.invalidate()
print "* file x changed inode"
repo.cached
# create empty file y
f = open('y', 'w')
f.close()
repo.invalidate()
print "* empty file y created"
# should recreate the object
repo.cached
f = open('y', 'w')
f.write('A')
f.close()
repo.invalidate()
print "* file y changed size"
# should recreate the object
repo.cached
f = scmutil.opener('.')('y', 'w', atomictemp=True)
f.write('B')
f.close()
repo.invalidate()
print "* file y changed inode"
repo.cached
f = scmutil.opener('.')('x', 'w', atomictemp=True)
f.write('c')
f.close()
f = scmutil.opener('.')('y', 'w', atomictemp=True)
f.write('C')
f.close()
repo.invalidate()
print "* both files changed inode"
repo.cached