本文整理汇总了Python中snakeoil.osutils.pjoin函数的典型用法代码示例。如果您正苦于以下问题:Python pjoin函数的具体用法?Python pjoin怎么用?Python pjoin使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pjoin函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_parents
def test_parents(self):
path = pjoin(self.dir, self.profile)
os.mkdir(pjoin(path, "child"))
self.write_file("parent", "..", profile="%s/child" % self.profile)
p = self.klass(pjoin(path, "child"))
self.assertEqual(1, len(p.parents))
self.assertEqual(p.parents[0].path, path)
示例2: test_packages
def test_packages(self):
p = self.klass(pjoin(self.dir, self.profile))
self.assertEqual(p.system, empty)
self.assertEqual(p.visibility, empty)
self.parsing_checks("packages", "system")
self.write_file("packages", "#foo\n")
p = self.klass(pjoin(self.dir, self.profile))
self.assertEqual(p.visibility, empty)
self.assertEqual(p.system, empty)
self.write_file("packages", "#foo\ndev-util/diffball\n")
p = self.klass(pjoin(self.dir, self.profile))
self.assertEqual(p.system, empty)
self.assertEqual(list(p.visibility), [(), (atom("dev-util/diffball", negate_vers=True),)])
self.write_file("packages", "-dev-util/diffball\ndev-foo/bar\n*dev-sys/atom\n" "-*dev-sys/atom2\nlock-foo/dar")
p = self.klass(pjoin(self.dir, self.profile))
self.assertEqual(p.system, ((atom("dev-sys/atom2"),), (atom("dev-sys/atom"),)))
self.assertEqual(
[set(x) for x in p.visibility],
[
set([atom("dev-util/diffball", negate_vers=True)]),
set([atom("dev-foo/bar", negate_vers=True), atom("lock-foo/dar", negate_vers=True)]),
],
)
self.simple_eapi_awareness_check("packages", "system")
示例3: parsing_checks
def parsing_checks(self, filename, attr, data="", line_negation=True):
path = pjoin(self.dir, self.profile)
self.write_file(filename, data)
getattr(self.klass(path), attr)
self.write_file(filename, "-")
self.assertRaises(profiles.ProfileError, getattr, self.klass(path), attr)
self.wipe_path(pjoin(path, filename))
示例4: fetch_one
def fetch_one(self, fetchable, observer, retry=False):
if fetchable.filename in self._basenames:
return True
# fetching files without uri won't fly
# XXX hack atm, could use better logic but works for now
try:
fp = self.fetcher(fetchable)
except fetch_errors.ChksumFailure as e:
# checksum failed, rename file and try refetching
path = pjoin(self.fetcher.distdir, fetchable.filename)
failed_filename = f'{fetchable.filename}._failed_chksum_'
failed_path = pjoin(self.fetcher.distdir, failed_filename)
os.rename(path, failed_path)
if retry:
raise
observer.error(str(e))
observer.error(f'renaming to {failed_filename!r} and refetching from upstream')
observer.flush()
# refetch directly from upstream
return self.fetch_one(fetchable.upstream, observer, retry=True)
except fetch_errors.FetchFailed as e:
fp = None
if fp is None:
return False
self.verified_files[fp] = fetchable
self._basenames.add(fetchable.filename)
return True
示例5: test_from_abspath
def test_from_abspath(self):
self.mk_profiles({"name": "profiles"}, {"name": "profiles/1"})
base = pjoin(self.dir, "profiles")
p = self.kls.from_abspath(pjoin(base, "1"))
self.assertNotEqual(p, None)
self.assertEqual(normpath(p.basepath), normpath(base))
self.assertEqual(normpath(p.profile), normpath(pjoin(base, "1")))
示例6: run_check
def run_check(*args):
# create a fresh tree for the profile work everytime.
# do this, so that it's always a unique pathway- this sidesteps
# any potential issues of ProfileNode instance caching.
path = pjoin(self.dir, 'foo', str(counter.next()))
shutil.copytree(pjoin(self.dir, 'foo'), path, symlinks=True)
return self.process_check(path, list(args))
示例7: config_from_make_conf
def config_from_make_conf(location="/etc/"):
"""
generate a config from a file location
:param location: location the portage configuration is based in,
defaults to /etc
"""
# this actually differs from portage parsing- we allow
# make.globals to provide vars used in make.conf, portage keeps
# them seperate (kind of annoying)
config_root = os.environ.get("PORTAGE_CONFIGROOT", "/")
base_path = pjoin(config_root, location.strip("/"))
portage_base = pjoin(base_path, "portage")
# this isn't preserving incremental behaviour for features/use
# unfortunately
conf_dict = {}
try:
load_make_config(conf_dict, pjoin(base_path, 'make.globals'))
except errors.ParsingError, e:
if not getattr(getattr(e, 'exc', None), 'errno', None) == errno.ENOENT:
raise
try:
load_make_config(conf_dict,
pjoin(config_root, 'usr/share/portage/config/make.globals'))
except compatibility.IGNORED_EXCEPTIONS:
raise
except:
compatibility.raise_from(errors.ParsingError(
"failed to find a usable make.globals"))
示例8: trigger
def trigger(self, engine, existing_cset, install_cset):
# hackish, but it works.
protected_filter = gen_config_protect_filter(
engine.offset, self.extra_protects, self.extra_disables).match
ignore_filter = gen_collision_ignore_filter(engine.offset).match
protected = {}
for x in existing_cset.iterfiles():
if not ignore_filter(x.location) and protected_filter(x.location):
replacement = install_cset[x]
if not simple_chksum_compare(replacement, x):
protected.setdefault(
pjoin(engine.offset,
os.path.dirname(x.location).lstrip(os.path.sep)),
[]).append((os.path.basename(replacement.location),
replacement))
for dir_loc, entries in protected.iteritems():
updates = {x[0]: [] for x in entries}
try:
existing = sorted(x for x in listdir_files(dir_loc)
if x.startswith("._cfg"))
except OSError as oe:
if oe.errno != errno.ENOENT:
raise
# this shouldn't occur.
continue
for x in existing:
try:
# ._cfg0000_filename
count = int(x[5:9])
if x[9] != "_":
raise ValueError
fn = x[10:]
except (ValueError, IndexError):
continue
if fn in updates:
updates[fn].append((count, fn))
# now we rename.
for fname, entry in entries:
# check for any updates with the same chksums.
count = 0
for cfg_count, cfg_fname in updates[fname]:
if simple_chksum_compare(livefs.gen_obj(
pjoin(dir_loc, cfg_fname)), entry):
count = cfg_count
break
count = max(count, cfg_count + 1)
try:
install_cset.remove(entry)
except KeyError:
# this shouldn't occur...
continue
new_fn = pjoin(dir_loc, "._cfg%04i_%s" % (count, fname))
new_entry = entry.change_attributes(location=new_fn)
install_cset.add(new_entry)
self.renames[new_entry] = entry
del updates
示例9: _visibility_limiters
def _visibility_limiters(self):
path = pjoin(self.base, 'profiles', 'package.mask')
pos, neg = [], []
try:
if (self.config.eapi.options['has_profile_data_dirs'] or
self.config.profile_formats.intersection(['portage-1', 'portage-2'])):
paths = sorted_scan(path)
else:
paths = [path]
for path in paths:
for line in iter_read_bash(path):
line = line.strip()
if line in ('-', ''):
raise profiles.ProfileError(
pjoin(self.base, 'profiles'),
'package.mask', "encountered empty negation: -")
if line.startswith('-'):
neg.append(atom.atom(line[1:]))
else:
pos.append(atom.atom(line))
except FileNotFoundError:
pass
except ebuild_errors.MalformedAtom as e:
raise profiles.ProfileError(
pjoin(self.base, 'profiles'), 'package.mask', e) from e
return tuple(neg), tuple(pos)
示例10: _add_sets
def _add_sets(self):
self["world"] = basics.AutoConfigSection({
"class": "pkgcore.pkgsets.filelist.WorldFile",
"location": pjoin(self.root, econst.WORLD_FILE.lstrip('/'))})
self["system"] = basics.AutoConfigSection({
"class": "pkgcore.pkgsets.system.SystemSet",
"profile": "profile"})
self["installed"] = basics.AutoConfigSection({
"class": "pkgcore.pkgsets.installed.Installed",
"vdb": "vdb"})
self["versioned-installed"] = basics.AutoConfigSection({
"class": "pkgcore.pkgsets.installed.VersionedInstalled",
"vdb": "vdb"})
set_fp = pjoin(self.dir, "sets")
try:
for setname in listdir_files(set_fp):
# Potential for name clashes here, those will just make
# the set not show up in config.
if setname in ("system", "world"):
logger.warning(
"user defined set %r is disallowed; ignoring",
pjoin(set_fp, setname))
continue
self[setname] = basics.AutoConfigSection({
"class": "pkgcore.pkgsets.filelist.FileList",
"location": pjoin(set_fp, setname)})
except FileNotFoundError:
pass
示例11: _internal_offset_iter_scan
def _internal_offset_iter_scan(path, chksum_handlers, offset, stat_func=os.lstat,
hidden=True, backup=True):
offset = normpath(offset)
path = normpath(path)
dirs = collections.deque([path[len(offset):]])
if dirs[0]:
yield gen_obj(dirs[0], chksum_handlers=chksum_handlers,
stat_func=stat_func)
sep = os.path.sep
while dirs:
base = dirs.popleft()
real_base = pjoin(offset, base.lstrip(sep))
base = base.rstrip(sep) + sep
for x in listdir(real_base):
if not hidden and x.startswith('.'):
continue
if not backup and x.endswith('~'):
continue
path = pjoin(base, x)
obj = gen_obj(path, chksum_handlers=chksum_handlers,
real_location=pjoin(real_base, x),
stat_func=os.lstat)
yield obj
if obj.is_dir:
dirs.append(path)
示例12: _caching_grab_virtuals
def _caching_grab_virtuals(repo, cache_basedir):
virtuals = {}
update = False
cache = _read_mtime_cache(pjoin(cache_basedir, 'virtuals.cache'))
existing = _get_mtimes(repo.location)
for cat, mtime in existing.iteritems():
d = cache.pop(cat, None)
if d is not None and long(d[0]) == long(mtime):
d = _convert_cached_virtuals(d)
if d is not None:
_merge_virtuals(virtuals, d)
continue
update = True
_collect_virtuals(virtuals, repo.itermatch(
packages.PackageRestriction("category",
values.StrExactMatch(cat))))
if update or cache:
_write_mtime_cache(existing, virtuals,
pjoin(cache_basedir, 'virtuals.cache'))
defaults = _collect_default_providers(virtuals)
# _finalize_virtuals(virtuals)
return defaults, virtuals
示例13: _clean_old_caches
def _clean_old_caches(path):
for name in ('plugincache2',):
try:
osutils.unlink_if_exists(pjoin(path, name))
except EnvironmentError, e:
logger.error("attempting to clean old plugin cache %r failed with %s",
pjoin(path, name), e)
示例14: process_subcommands
def process_subcommands(self, parser, name, action_group):
l = []
h = self._get_formatter(parser, name)
h.add_arguments(action_group._group_actions)
data = h.format_help().strip()
if data:
assert len(action_group._group_actions) == 1
l.extend(_rst_header("=", action_group.title))
if action_group.description:
l.extend(action_group.description.split("\n"))
for subcommand, parser in action_group._group_actions[0].choices.iteritems():
subdir_path = self.name.split()[1:]
base = pjoin(self.base_path, *subdir_path)
self.__class__(base, "%s %s" % (
self.name, subcommand), parser, mtime=self.mtime, out_name=subcommand).run()
toc_path = self.name.split()
if subdir_path:
toc_path = subdir_path
l.append('')
l.append(".. toctree::")
l.append(" :maxdepth: 2")
l.append('')
l.extend(" %s %s <%s>" %
(name, subcommand, pjoin(*list(toc_path + [subcommand])))
for subcommand in action_group._group_actions[0].choices)
l.append('')
return l
示例15: _split
def _split(self, iterable, observer, engine, cset):
debug_store = pjoin(engine.offset, self._debug_storage.lstrip('/'))
objcopy_args = [self.objcopy_binary, '--only-keep-debug']
if self._compress:
objcopy_args.append('--compress-debug-sections')
for fs_objs, ftype in iterable:
if 'ar archive' in ftype:
continue
if 'relocatable' in ftype:
if not any(x.basename.endswith(".ko") for x in fs_objs):
continue
fs_obj = fs_objs[0]
debug_loc = pjoin(debug_store, fs_obj.location.lstrip('/') + ".debug")
if debug_loc in cset:
continue
fpath = fs_obj.data.path
debug_ondisk = pjoin(os.path.dirname(fpath),
os.path.basename(fpath) + ".debug")
# note that we tell the UI the final pathway- not the intermediate one.
observer.info("splitdebug'ing %s into %s" %
(fs_obj.location, debug_loc))
ret = spawn.spawn(objcopy_args + [fpath, debug_ondisk])
if ret != 0:
observer.warn("splitdebug'ing %s failed w/ exitcode %s" %
(fs_obj.location, ret))
continue
# note that the given pathway to the debug file /must/ be relative to ${D};
# it must exist at the time of invocation.
ret = spawn.spawn([self.objcopy_binary,
'--add-gnu-debuglink', debug_ondisk, fpath])
if ret != 0:
observer.warn("splitdebug created debug file %r, but "
"failed adding links to %r (%r)" % (debug_ondisk, fpath, ret))
observer.debug("failed splitdebug command was %r",
(self.objcopy_binary, '--add-gnu-debuglink', debug_ondisk, fpath))
continue
debug_obj = gen_obj(debug_loc, real_location=debug_ondisk,
uid=os_data.root_uid, gid=os_data.root_gid)
stripped_fsobj = self._strip_fsobj(fs_obj, ftype, observer, quiet=True)
self._modified.add(stripped_fsobj)
self._modified.add(debug_obj)
for fs_obj in fs_objs[1:]:
debug_loc = pjoin(debug_store,
fs_obj.location.lstrip('/') + ".debug")
linked_debug_obj = debug_obj.change_attributes(location=debug_loc)
observer.info("splitdebug hardlinking %s to %s" %
(debug_obj.location, debug_loc))
self._modified.add(linked_debug_obj)
self._modified.add(stripped_fsobj.change_attributes(
location=fs_obj.location))