本文整理汇总了Python中snakeoil.currying.partial函数的典型用法代码示例。如果您正苦于以下问题:Python partial函数的具体用法?Python partial怎么用?Python partial使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了partial函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, raw_repo, domain, domain_settings, fetcher=None):
"""
:param raw_repo: :obj:`_UnconfiguredTree` instance
:param domain_settings: environment settings to bind
:param fetcher: :obj:`pkgcore.fetch.base.fetcher` instance to use
for getting access to fetchable files
"""
if "USE" not in domain_settings:
raise errors.InitializationError(
"%s requires the following settings: 'USE', not supplied" % (
self.__class__,))
elif 'CHOST' not in domain_settings:
raise errors.InitializationError(
"%s requires the following settings: 'CHOST', not supplied" % (
self.__class__,))
chost = domain_settings['CHOST']
scope_update = {'chost': chost}
scope_update.update((x, domain_settings.get(x.upper(), chost))
for x in ('cbuild', 'ctarget'))
scope_update['operations_callback'] = self._generate_pkg_operations
self.config_wrappables['iuse_effective'] = partial(
self._generate_iuse_effective, domain.profile)
configured.tree.__init__(self, raw_repo, self.config_wrappables,
pkg_kls_injections=scope_update)
self._get_pkg_use = domain.get_package_use_unconfigured
self._get_pkg_use_for_building = domain.get_package_use_buildable
self.domain_settings = domain_settings
self.fetcher_override = fetcher
self._delayed_iuse = partial(make_kls(InvertedContains),
InvertedContains)
示例2: __init__
def __init__(self, **kwargs):
kwargs.setdefault('observer', None)
self._triggers = []
for k, v in kwargs.iteritems():
if callable(v):
v = partial(v, self)
setattr(self, k, v)
示例3: archive_to_fsobj
def archive_to_fsobj(src_tar):
psep = os.path.sep
for member in src_tar:
d = {
"uid":member.uid, "gid":member.gid,
"mtime":member.mtime, "mode":member.mode}
location = psep + member.name.strip(psep)
if member.isdir():
if member.name.strip(psep) == ".":
continue
yield fsDir(location, **d)
elif member.isreg():
d["data"] = invokable_data_source.wrap_function(partial(
src_tar.extractfile, member.name), returns_text=False,
returns_handle=True)
# suppress hardlinks until the rest of pkgcore is updated for it.
d["dev"] = None
d["inode"] = None
yield fsFile(location, **d)
elif member.issym() or member.islnk():
yield fsSymlink(location, member.linkname, **d)
elif member.isfifo():
yield fsFifo(location, **d)
elif member.isdev():
d["major"] = long(member.major)
d["minor"] = long(member.minor)
yield fsDev(location, **d)
else:
raise AssertionError(
"unknown type %r, %r was encounted walking tarmembers" %
(member, member.type))
示例4: _cmd_implementation_sanity_check
def _cmd_implementation_sanity_check(self, domain):
pkg = self.pkg
eapi = pkg.eapi_obj
if eapi.options.has_required_use:
use = pkg.use
for node in pkg.required_use:
if not node.match(use):
print "REQUIRED_USE requirement weren't met\nFailed to match: %s\nfrom: %s\nfor USE: %s\npkg: %s" % \
(node, pkg.required_use, " ".join(use), pkg)
return False
if 'pretend' not in pkg.mandatory_phases:
return True
commands = {"request_inherit": partial(inherit_handler, self._eclass_cache)}
env = expected_ebuild_env(pkg)
env["ROOT"] = domain.root
try:
logger.debug("running ebuild pkg_pretend sanity check for %s", pkg)
start = time.time()
ret = run_generic_phase(pkg, "pretend", env, True, True, False,
extra_handlers=commands)
logger.debug("pkg_pretend sanity check for %s took %2.2f seconds",
pkg, time.time() - start)
return ret
except format.GenericBuildError, e:
logger.error("pkg_pretend sanity check for %s failed with exception %r"
% (pkg, e))
return False
示例5: merge_contents
def merge_contents(cset, offset=None, callback=None):
"""
merge a :class:`pkgcore.fs.contents.contentsSet` instance to the livefs
:param cset: :class:`pkgcore.fs.contents.contentsSet` instance
:param offset: if not None, offset to prefix all locations with.
Think of it as target dir.
:param callback: callable to report each entry being merged; given a single arg,
the fs object being merged.
:raise EnvironmentError: Thrown for permission failures.
"""
if callback is None:
callback = lambda obj:None
ensure_perms = get_plugin("fs_ops.ensure_perms")
copyfile = get_plugin("fs_ops.copyfile")
mkdir = get_plugin("fs_ops.mkdir")
if not isinstance(cset, contents.contentsSet):
raise TypeError("cset must be a contentsSet, got %r" % (cset,))
if offset is not None:
if os.path.exists(offset):
if not os.path.isdir(offset):
raise TypeError("offset must be a dir, or not exist: %s" % offset)
else:
mkdir(fs.fsDir(offset, strict=False))
iterate = partial(contents.offset_rewriter, offset.rstrip(os.path.sep))
else:
iterate = iter
d = list(iterate(cset.iterdirs()))
d.sort()
for x in d:
callback(x)
try:
# we pass in the stat ourselves, using stat instead of
# lstat gen_obj uses internally; this is the equivalent of
# "deference that link"
obj = gen_obj(x.location, stat=os.stat(x.location))
if not fs.isdir(obj):
raise Exception(
"%s exists and needs to be a dir, but is a %s" %
(x.location, obj))
ensure_perms(x, obj)
except OSError, oe:
if oe.errno != errno.ENOENT:
raise
try:
# we do this form to catch dangling symlinks
mkdir(x)
except OSError, oe:
if oe.errno != errno.EEXIST:
raise
os.unlink(x.location)
mkdir(x)
ensure_perms(x)
示例6: make_keywords_filter
def make_keywords_filter(self, arch, default_keys, accept_keywords,
profile_keywords, incremental=False):
"""Generates a restrict that matches iff the keywords are allowed."""
if not accept_keywords and not profile_keywords:
return packages.PackageRestriction(
"keywords", values.ContainmentMatch(*default_keys))
if "~" + arch.lstrip("~") not in default_keys:
# stable; thus empty entries == ~arch
unstable = "~" + arch
def f(r, v):
if not v:
return r, unstable
return r, v
data = collapsed_restrict_to_data(
((packages.AlwaysTrue, default_keys),),
(f(*i) for i in accept_keywords))
else:
if incremental:
f = collapsed_restrict_to_data
else:
f = non_incremental_collapsed_restrict_to_data
data = f(((packages.AlwaysTrue, default_keys),), accept_keywords)
if incremental:
raise NotImplementedError(self.incremental_apply_keywords_filter)
#f = self.incremental_apply_keywords_filter
else:
f = self.apply_keywords_filter
return delegate(partial(f, data, profile_keywords))
示例7: __init__
def __init__(self, dbs, per_repo_strategy,
global_strategy=None,
depset_reorder_strategy=None,
process_built_depends=False,
drop_cycles=False, debug=False, debug_handle=None):
if debug_handle is None:
debug_handle = sys.stdout
self.debug_handler = debug_handle
self._dprint = partial(dprint, debug_handle)
if not isinstance(dbs, (list, tuple)):
dbs = [dbs]
if global_strategy is None:
global_strategy = self.default_global_strategy
if depset_reorder_strategy is None:
depset_reorder_strategy = self.default_depset_reorder_strategy
self.depset_reorder = depset_reorder_strategy
self.per_repo_strategy = per_repo_strategy
self.total_ordering_strategy = global_strategy
self.all_raw_dbs = [misc.caching_repo(x, self.per_repo_strategy) for x in dbs]
self.all_dbs = global_strategy(self, self.all_raw_dbs)
self.default_dbs = self.all_dbs
self.state = state.plan_state()
vdb_state_filter_restrict = MutableContainmentRestriction(self.state.vdb_filter)
self.livefs_dbs = multiplex.tree(
*[visibility.filterTree(x, vdb_state_filter_restrict)
for x in self.all_raw_dbs if x.livefs])
self.insoluble = set()
self.vdb_preloaded = False
self._ensure_livefs_is_loaded = \
self._ensure_livefs_is_loaded_nonpreloaded
self.drop_cycles = drop_cycles
self.process_built_depends = process_built_depends
self._debugging = debug
if debug:
self._rec_add_atom = partial(self._stack_debugging_rec_add_atom,
self._rec_add_atom)
self._debugging_depth = 0
self._debugging_drop_cycles = False
示例8: itermatch
def itermatch(self, restrict, **kwds):
kwds.setdefault("force", True)
o = kwds.get("pkg_klass_override")
if o is not None:
kwds["pkg_klass_override"] = partial(self.package_class, o)
else:
kwds["pkg_klass_override"] = self.package_class
return (x for x in self.raw_repo.itermatch(restrict, **kwds) if x.is_supported)
示例9: __init__
def __init__(self, data, frozen=False, livefs=False):
self.installed = []
self.replaced = []
self.uninstalled = []
util.SimpleTree.__init__(self, data,
pkg_klass=partial(fake_pkg, self))
self.livefs = livefs
self.frozen = frozen
示例10: generate_collapsed_restriction
def generate_collapsed_restriction(atoms, negate=False):
d = {}
for a in atoms:
k = a.key
if k not in d:
d[k] = [a]
else:
d[k].append(a)
return delegate(partial(_collapsed_restrict_match, d), negate=negate)
示例11: generate_providers
def generate_providers(self):
rdep = AndRestriction(self.versioned_atom)
func = partial(virtual_ebuild, self._parent, self,
{"rdepends":rdep, "slot":"%s-%s" % (self.category, self.version)})
# re-enable license at some point.
#, "license":self.license})
return conditionals.DepSet.parse(
self.data.pop("PROVIDE", ""), virtual_ebuild, element_func=func,
operators={"":boolean.AndRestriction})
示例12: parse_owns
def parse_owns(value):
"Value is a comma delimited set of paths to search contents for"
# yes it would be easier to do this without using parserestrict-
# we use defer to using it for the sake of a common parsing
# exposed to the commandline however.
# the problem here is we don't want to trigger fs* module loadup
# unless needed- hence this function.
parser = parserestrict.comma_separated_containment('contents',
values_kls=contents_module.contentsSet,
token_kls=partial(fs_module.fsBase, strict=False))
return parser(value)
示例13: clone
def clone(self, unfreeze=False):
obj = self.__class__()
if self.frozen and not unfreeze:
obj._dict = self._dict
obj._global_settings = self._global_settings
return obj
obj._dict = defaultdict(partial(list, self._global_settings))
for key, values in self._dict.iteritems():
obj._dict[key].extend(values)
obj._global_settings = list(self._global_settings)
return obj
示例14: __init__
def __init__(self, mode, tempdir, hooks, csets, preserves, observer,
offset=None, disable_plugins=False, parallelism=None):
if observer is None:
observer = observer_mod.repo_observer(observer_mod.null_output)
self.observer = observer
self.mode = mode
if tempdir is not None:
tempdir = normpath(tempdir) + '/'
self.tempdir = tempdir
if parallelism is None:
parallelism = get_proc_count()
self.parallelism = parallelism
self.hooks = ImmutableDict((x, []) for x in hooks)
self.preserve_csets = []
self.cset_sources = {}
# instantiate these separately so their values are preserved
self.preserved_csets = LazyValDict(
self.preserve_csets, self._get_cset_source)
for k, v in csets.iteritems():
if isinstance(v, basestring):
v = getattr(self, v, v)
if not callable(v):
raise TypeError(
"cset values must be either the string name of "
"existing methods, or callables (got %s)" % v)
if k in preserves:
self.add_preserved_cset(k, v)
else:
self.add_cset(k, v)
if offset is None:
offset = "/"
self.offset = offset
if not disable_plugins:
# merge in default triggers first.
for trigger in get_plugins('triggers'):
t = trigger()
t.register(self)
# merge in overrides
for hook, triggers in hooks.iteritems():
for trigger in triggers:
self.add_trigger(hook, trigger)
self.regenerate_csets()
for x in hooks:
setattr(self, x, currying.partial(self.execute_hook, x))
示例15: test_subcommand
def test_subcommand(self):
class SubParser(commandline.OptionParser):
def check_values(self, values, args):
values, args = commandline.OptionParser.check_values(
self, values, args)
values.args = args
values.progname = self.prog
return values, ()
def submain(status, options, out, err, subs=('sub',)):
self.assertEqual(options.args, ['subarg'])
self.assertEqual(options.progname, 'fo %s' % (' '.join(subs),))
return status
self.assertMain(
0, '', '',
{'sub': (SubParser, partial(submain,0))}, ['sub', 'subarg'], script_name='fo')
self.assertMain(
1, '', '',
{'sub': {'sub2': (SubParser, partial(submain, 1, subs=('sub', 'sub2')))}},
['sub', 'sub2', 'subarg'], script_name='fo')