本文整理汇总了Python中twitter.common.collections.OrderedDict类的典型用法代码示例。如果您正苦于以下问题:Python OrderedDict类的具体用法?Python OrderedDict怎么用?Python OrderedDict使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了OrderedDict类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _calculate_classpath
def _calculate_classpath(self, targets):
jars = OrderedDict()
excludes = set()
# Support the ivy force concept when we sanely can for internal dep conflicts.
# TODO(John Sirois): Consider supporting / implementing the configured ivy revision picking
# strategy generally.
def add_jar(jar):
coordinate = (jar.org, jar.name)
existing = jars.get(coordinate)
jars[coordinate] = jar if not existing else (
self._resolve_conflict(existing=existing, proposed=jar)
)
def collect_jars(target):
if target.is_jvm or target.is_jar_library:
for jar in target.jar_dependencies:
if jar.rev:
add_jar(jar)
# Lift jvm target-level excludes up to the global excludes set
if target.is_jvm and target.payload.excludes:
excludes.update(target.payload.excludes)
for target in targets:
target.walk(collect_jars)
return jars.values(), excludes
示例2: execution_order
def execution_order(phases):
"""
Yields goals in execution order for the given phases. Does not account for goals run
multiple times due to grouping.
"""
dependencies_by_goal = OrderedDict()
def populate_dependencies(phases):
for phase in phases:
for goal in phase.goals():
if goal not in dependencies_by_goal:
populate_dependencies(goal.dependencies)
deps = OrderedSet()
for phasedep in goal.dependencies:
deps.update(phasedep.goals())
dependencies_by_goal[goal] = deps
populate_dependencies(phases)
while dependencies_by_goal:
for goal, deps in dependencies_by_goal.items():
if not deps:
dependencies_by_goal.pop(goal)
for _, deps in dependencies_by_goal.items():
if goal in deps:
deps.discard(goal)
yield goal
示例3: write
def write(self, target, path, confs=None):
def as_jar(internal_target):
jar, _, _, _ = self.get_db(internal_target).as_jar_with_version(internal_target)
return jar
# TODO(John Sirois): a dict is used here to de-dup codegen targets which have both the original
# codegen target - say java_thrift_library - and the synthetic generated target (java_library)
# Consider reworking codegen tasks to add removal of the original codegen targets when rewriting
# the graph
dependencies = OrderedDict()
internal_codegen = {}
for dep in target_internal_dependencies(target):
jar = as_jar(dep)
dependencies[(jar.org, jar.name)] = self.internaldep(jar, dep)
if dep.is_codegen:
internal_codegen[jar.name] = jar.name
for jar in target.jar_dependencies:
if jar.rev:
dependencies[(jar.org, jar.name)] = self.jardep(jar)
target_jar = self.internaldep(as_jar(target)).extend(dependencies=dependencies.values())
template_kwargs = self.templateargs(target_jar, confs)
with safe_open(path, 'w') as output:
template = pkgutil.get_data(__name__, self.template_relpath)
Generator(template, **template_kwargs).write(output)
示例4: compile
def compile(self, classpath, sources, output_dir, analysis_cache, upstream_analysis_caches, depfile):
# To pass options to scalac simply prefix with -S.
args = ['-S' + x for x in self._scalac_args]
def analysis_cache_full_path(analysis_cache_product):
# We expect the argument to be { analysis_cache_dir, [ analysis_cache_file ]}.
if len(analysis_cache_product) != 1:
raise TaskError('There can only be one analysis cache file per output directory')
analysis_cache_dir, analysis_cache_files = analysis_cache_product.iteritems().next()
if len(analysis_cache_files) != 1:
raise TaskError('There can only be one analysis cache file per output directory')
return os.path.join(analysis_cache_dir, analysis_cache_files[0])
# Strings of <output dir>:<full path to analysis cache file for the classes in that dir>.
analysis_map =\
OrderedDict([ (k, analysis_cache_full_path(v)) for k, v in upstream_analysis_caches.itermappings() ])
if len(analysis_map) > 0:
args.extend([ '-analysis-map', ','.join(['%s:%s' % kv for kv in analysis_map.items()]) ])
args.extend([
'-analysis-cache', analysis_cache,
'-classpath', ':'.join(self._zinc_classpath + classpath),
'-output-products', depfile,
'-mirror-analysis',
'-d', output_dir
])
args.extend(sources)
return self.run_zinc(args)
示例5: write
def write(self, target, path, confs=None, extra_confs=None):
# TODO(John Sirois): a dict is used here to de-dup codegen targets which have both the original
# codegen target - say java_thrift_library - and the synthetic generated target (java_library)
# Consider reworking codegen tasks to add removal of the original codegen targets when rewriting
# the graph
dependencies = OrderedDict()
internal_codegen = {}
configurations = set(confs or [])
for dep in target_internal_dependencies(target):
jar = self._as_versioned_jar(dep)
dependencies[(jar.org, jar.name)] = self.internaldep(jar, dep)
if dep.is_codegen:
internal_codegen[jar.name] = jar.name
for jar in target.jar_dependencies:
if jar.rev:
dependencies[(jar.org, jar.name)] = self.jardep(jar)
configurations |= set(jar._configurations)
target_jar = self.internaldep(self._as_versioned_jar(target),
configurations=list(configurations))
target_jar = target_jar.extend(dependencies=dependencies.values())
template_kwargs = self.templateargs(target_jar, confs, extra_confs)
with safe_open(path, 'w') as output:
template = pkgutil.get_data(self.template_package_name, self.template_relpath)
Generator(template, **template_kwargs).write(output)
示例6: identify_zinc_jars
def identify_zinc_jars(zinc_classpath):
"""Find the named jars in the zinc classpath.
TODO: Make these mappings explicit instead of deriving them by jar name heuristics.
"""
ret = OrderedDict()
ret.update(ZincUtils.identify_jars(ZincUtils.ZINC_JAR_NAMES, zinc_classpath))
return ret
示例7: identify_zinc_jars
def identify_zinc_jars(zinc_classpath):
"""Find the named jars in the zinc classpath.
TODO: When profiles migrate to regular pants jar() deps instead of ivy.xml files we can
make these mappings explicit instead of deriving them by jar name heuristics.
"""
ret = OrderedDict()
ret.update(ZincUtils.identify_jars(ZincUtils.zinc_jar_names, zinc_classpath))
return ret
示例8: identify_zinc_jars
def identify_zinc_jars(compiler_classpath, zinc_classpath):
"""Find the named jars in the compiler and zinc classpaths.
TODO: When profiles migrate to regular pants jar() deps instead of ivy.xml files we can make these
mappings explicit instead of deriving them by jar name heuristics.
"""
ret = OrderedDict()
ret.update(ScalaCompile.identify_jars(ScalaCompile.compiler_jar_names, compiler_classpath))
ret.update(ScalaCompile.identify_jars(ScalaCompile.zinc_jar_names, zinc_classpath))
return ret
示例9: compile
def compile(self, classpath, sources, output_dir, analysis_cache, upstream_analysis_caches, depfile):
safe_mkdir(output_dir)
compiler_classpath = nailgun_profile_classpath(self, self._compile_profile)
compiler_args = []
# TODO(John Sirois): separate compiler profile from runtime profile
compiler_args.extend([
# Support for outputting a dependencies file of source -> class
'-Xplugin:%s' % self.get_depemitter_plugin(),
'-P:depemitter:file:%s' % depfile
])
compiler_args.extend(self._args)
# To pass options to scalac simply prefix with -S.
args = ['-S' + x for x in compiler_args]
def analysis_cache_full_path(analysis_cache_product):
# We expect the argument to be { analysis_cache_dir, [ analysis_cache_file ]}.
if len(analysis_cache_product) != 1:
raise TaskError('There can only be one analysis cache file per output directory')
analysis_cache_dir, analysis_cache_files = analysis_cache_product.iteritems().next()
if len(analysis_cache_files) != 1:
raise TaskError('There can only be one analysis cache file per output directory')
return os.path.join(analysis_cache_dir, analysis_cache_files[0])
# Strings of <output dir>:<full path to analysis cache file for the classes in that dir>.
analysis_map = \
OrderedDict([ (k, analysis_cache_full_path(v)) for k, v in upstream_analysis_caches.itermappings() ])
if len(analysis_map) > 0:
args.extend([ '-analysis-map', ','.join(['%s:%s' % kv for kv in analysis_map.items()]) ])
upstream_classes_dirs = analysis_map.keys()
zinc_classpath = nailgun_profile_classpath(self, self._zinc_profile)
zinc_jars = ScalaCompile.identify_zinc_jars(compiler_classpath, zinc_classpath)
for (name, jarpath) in zinc_jars.items(): # The zinc jar names are also the flag names.
args.extend(['-%s' % name, jarpath])
args.extend([
'-analysis-cache', analysis_cache,
'-log-level', self.context.options.log_level or 'info',
'-classpath', ':'.join(zinc_classpath + classpath + upstream_classes_dirs),
'-d', output_dir
])
if not self._color:
args.append('-no-color')
args.extend(sources)
self.context.log.debug('Executing: %s %s' % (self._main, ' '.join(args)))
return self.runjava(self._main, classpath=zinc_classpath, args=args, jvmargs=self._jvm_args)
示例10: reset
def reset(self):
"""Clear out the state of the BuildGraph, in particular Target mappings and dependencies."""
self._addresses_already_closed = set()
self._target_by_address = OrderedDict()
self._target_dependencies_by_address = defaultdict(OrderedSet)
self._target_dependees_by_address = defaultdict(set)
self._derived_from_by_derivative_address = {}
示例11: __init__
def __init__(self,
checkpoint_root,
verbose=True,
task_killer=TaskKiller,
executor_detector=ExecutorDetector,
task_garbage_collector=TaskGarbageCollector,
clock=time):
ExecutorBase.__init__(self)
ExceptionalThread.__init__(self)
self.daemon = True
self._stop_event = threading.Event()
# mapping of task_id => (TaskInfo, AdjustRetainedTasks), in the order in
# which they were received via a launchTask.
self._gc_task_queue = OrderedDict()
# cache the ExecutorDriver provided by the slave, so we can use it out
# of band from slave-initiated callbacks. This should be supplied by
# ExecutorBase.registered() when the executor first registers with the
# slave.
self._driver = None
self._slave_id = None # cache the slave ID provided by the slave
self._task_id = None # the task_id currently being executed by the ThermosGCExecutor, if any
self._start_time = None # the start time of a task currently being executed, if any
self._detector = executor_detector()
self._collector = task_garbage_collector(root=checkpoint_root)
self._clock = clock
self._task_killer = task_killer
self._checkpoint_root = checkpoint_root
self._dropped_tasks = AtomicGauge('dropped_tasks')
self.metrics.register(self._dropped_tasks)
示例12: __init__
def __init__(self, older_than=120, aggregation_depth=0):
"""
datapoints that are `older_than` will be dropped
if aggregation_depth > 0 then we aggregate for paths up to that depth
"""
self._older_than = older_than
self._aggregation_depth = aggregation_depth
self._requests_by_timestamp = OrderedDict()
self._lock = Lock()
super(PerPathDatapoints, self).__init__()
示例13: test_default_maybe_list
def test_default_maybe_list():
HELLO_WORLD = ['hello', 'world']
assert maybe_list('hello') == ['hello']
assert maybe_list(('hello', 'world')) == HELLO_WORLD
assert maybe_list(['hello', 'world']) == HELLO_WORLD
assert maybe_list(OrderedSet(['hello', 'world', 'hello'])) == HELLO_WORLD
assert maybe_list(s for s in ('hello', 'world')) == HELLO_WORLD
od = OrderedDict(hello=1)
od.update(world=2)
assert maybe_list(od) == HELLO_WORLD
assert maybe_list([]) == []
assert maybe_list(()) == []
assert maybe_list(set()) == []
with pytest.raises(ValueError):
maybe_list(123)
with pytest.raises(ValueError):
maybe_list(['hello', 123])
assert maybe_list(['hello', 123], expected_type=(str, int)) == ['hello', 123]
assert maybe_list(['hello', 123], expected_type=(int, str)) == ['hello', 123]
示例14: compile
def compile(self, classpath, sources, output_dir, analysis_cache, upstream_analysis_caches, depfile):
# To pass options to scalac simply prefix with -S.
args = ['-S' + x for x in self._args]
def analysis_cache_full_path(analysis_cache_product):
# We expect the argument to be { analysis_cache_dir, [ analysis_cache_file ]}.
if len(analysis_cache_product) != 1:
raise TaskError('There can only be one analysis cache file per output directory')
analysis_cache_dir, analysis_cache_files = analysis_cache_product.iteritems().next()
if len(analysis_cache_files) != 1:
raise TaskError('There can only be one analysis cache file per output directory')
return os.path.join(analysis_cache_dir, analysis_cache_files[0])
# Strings of <output dir>:<full path to analysis cache file for the classes in that dir>.
analysis_map = \
OrderedDict([ (k, analysis_cache_full_path(v)) for k, v in upstream_analysis_caches.itermappings() ])
args.extend(self._zinc_jar_args)
if len(analysis_map) > 0:
args.extend([ '-analysis-map', ','.join(['%s:%s' % kv for kv in analysis_map.items()]) ])
args.extend([
'-analysis-cache', analysis_cache,
'-log-level', self.context.options.log_level or 'info',
'-classpath', ':'.join(self._zinc_classpath + classpath),
'-output-products', depfile,
'-mirror-analysis',
'-d', output_dir
])
if not self._color:
args.append('-no-color')
args.extend(sources)
self.context.log.debug('Executing: %s %s' % (self._main, ' '.join(args)))
return self.runjava(self._main, classpath=self._zinc_classpath, args=args, jvmargs=self._jvm_args)
示例15: BuildGraph
class BuildGraph(object):
"""A directed acyclic graph of Targets and dependencies. Not necessarily connected.
"""
class DuplicateAddressError(AddressLookupError):
"""The same address appears multiple times in a dependency list"""
class TransitiveLookupError(AddressLookupError):
"""Used to append the current node to the error message from an AddressLookupError """
def __init__(self, address_mapper, run_tracker=None):
self._address_mapper = address_mapper
self.run_tracker = run_tracker
self.reset()
def reset(self):
"""Clear out the state of the BuildGraph, in particular Target mappings and dependencies."""
self._addresses_already_closed = set()
self._target_by_address = OrderedDict()
self._target_dependencies_by_address = defaultdict(OrderedSet)
self._target_dependees_by_address = defaultdict(set)
self._derived_from_by_derivative_address = {}
def contains_address(self, address):
return address in self._target_by_address
def get_target_from_spec(self, spec, relative_to=''):
"""Converts `spec` into a SyntheticAddress and returns the result of `get_target`"""
return self.get_target(SyntheticAddress.parse(spec, relative_to=relative_to))
def get_target(self, address):
"""Returns the Target at `address` if it has been injected into the BuildGraph, otherwise None.
"""
return self._target_by_address.get(address, None)
def dependencies_of(self, address):
"""Returns the dependencies of the Target at `address`.
This method asserts that the address given is actually in the BuildGraph.
"""
assert address in self._target_by_address, (
'Cannot retrieve dependencies of {address} because it is not in the BuildGraph.'
.format(address=address)
)
return self._target_dependencies_by_address[address]
def dependents_of(self, address):
"""Returns the Targets which depend on the target at `address`.
This method asserts that the address given is actually in the BuildGraph.
"""
assert address in self._target_by_address, (
'Cannot retrieve dependents of {address} because it is not in the BuildGraph.'
.format(address=address)
)
return self._target_dependees_by_address[address]
def get_derived_from(self, address):
"""Get the target the specified target was derived from.
If a Target was injected programmatically, e.g. from codegen, this allows us to trace its
ancestry. If a Target is not derived, default to returning itself.
"""
parent_address = self._derived_from_by_derivative_address.get(address, address)
return self.get_target(parent_address)
def get_concrete_derived_from(self, address):
"""Get the concrete target the specified target was (directly or indirectly) derived from.
The returned target is guaranteed to not have been derived from any other target.
"""
current_address = address
next_address = self._derived_from_by_derivative_address.get(current_address, current_address)
while next_address != current_address:
current_address = next_address
next_address = self._derived_from_by_derivative_address.get(current_address, current_address)
return self.get_target(current_address)
def inject_target(self, target, dependencies=None, derived_from=None):
"""Injects a fully realized Target into the BuildGraph.
:param Target target: The Target to inject.
:param list<Address> dependencies: The Target addresses that `target` depends on.
:param Target derived_from: The Target that `target` was derived from, usually as a result
of codegen.
"""
dependencies = dependencies or frozenset()
address = target.address
if address in self._target_by_address:
raise ValueError('A Target {existing_target} already exists in the BuildGraph at address'
' {address}. Failed to insert {target}.'
.format(existing_target=self._target_by_address[address],
address=address,
target=target))
if derived_from:
if not self.contains_address(derived_from.address):
raise ValueError('Attempted to inject synthetic {target} derived from {derived_from}'
#.........这里部分代码省略.........