当前位置: 首页>>代码示例>>Python>>正文


Python orderedset.OrderedSet类代码示例

本文整理汇总了Python中twitter.common.collections.orderedset.OrderedSet的典型用法代码示例。如果您正苦于以下问题:Python OrderedSet类的具体用法?Python OrderedSet怎么用?Python OrderedSet使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了OrderedSet类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: do_check_artifact_cache

  def do_check_artifact_cache(self, vts, post_process_cached_vts=None):
    """Checks the artifact cache for the specified list of VersionedTargetSets.

    Returns a pair (cached, uncached) of VersionedTargets that were
    satisfied/unsatisfied from the cache.
    """
    if not vts:
      return [], []

    cached_vts = []
    uncached_vts = OrderedSet(vts)

    with self.context.new_workunit(name='check', labels=[WorkUnit.MULTITOOL]) as parent:
      res = self.context.submit_foreground_work_and_wait(
        Work(lambda vt: bool(self.get_artifact_cache().use_cached_files(vt.cache_key)),
             [(vt, ) for vt in vts], 'check'), workunit_parent=parent)
    for vt, was_in_cache in zip(vts, res):
      if was_in_cache:
        cached_vts.append(vt)
        uncached_vts.discard(vt)
    # Note that while the input vts may represent multiple targets (for tasks that overrride
    # check_artifact_cache_for), the ones we return must represent single targets.
    def flatten(vts):
      return list(itertools.chain.from_iterable([vt.versioned_targets for vt in vts]))
    all_cached_vts, all_uncached_vts = flatten(cached_vts), flatten(uncached_vts)
    if post_process_cached_vts:
      post_process_cached_vts(all_cached_vts)
    for vt in all_cached_vts:
      vt.update()
    return all_cached_vts, all_uncached_vts
开发者ID:theyelllowdart,项目名称:commons,代码行数:30,代码来源:task.py

示例2: do_check_artifact_cache

    def do_check_artifact_cache(self, vts, post_process_cached_vts=None):
        """Checks the artifact cache for the specified list of VersionedTargetSets.

    Returns a pair (cached, uncached) of VersionedTargets that were
    satisfied/unsatisfied from the cache.
    """
        if not vts:
            return [], []

        cached_vts = []
        uncached_vts = OrderedSet(vts)

        read_cache = self._cache_factory.get_read_cache()
        items = [(read_cache, vt.cache_key) for vt in vts]

        res = self.context.subproc_map(call_use_cached_files, items)

        for vt, was_in_cache in zip(vts, res):
            if was_in_cache:
                cached_vts.append(vt)
                uncached_vts.discard(vt)
            elif isinstance(was_in_cache, UnreadableArtifact):
                self._cache_key_errors.update(was_in_cache.key)

        # Note that while the input vts may represent multiple targets (for tasks that overrride
        # check_artifact_cache_for), the ones we return must represent single targets.
        def flatten(vts):
            return list(itertools.chain.from_iterable([vt.versioned_targets for vt in vts]))

        all_cached_vts, all_uncached_vts = flatten(cached_vts), flatten(uncached_vts)
        if post_process_cached_vts:
            post_process_cached_vts(all_cached_vts)
        for vt in all_cached_vts:
            vt.update()
        return all_cached_vts, all_uncached_vts
开发者ID:qiaohaijun,项目名称:pants,代码行数:35,代码来源:task.py

示例3: create

  def create(cls, ftype, relative_to, files=None, globs=None, rglobs=None, zglobs=None):
    """Given various file patterns create a PathGlobs object (without using filesystem operations).

    TODO: This currently sortof-executes parsing via 'to_filespec'. Should maybe push that out to
    callers to make them deal with errors earlier.

    :param relative_to: The path that all patterns are relative to (which will itself be relative
                        to the buildroot).
    :param ftype: A Stat subclass indicating which Stat type will be matched.
    :param files: A list of relative file paths to include.
    :type files: list of string.
    :param string globs: A relative glob pattern of files to include.
    :param string rglobs: A relative recursive glob pattern of files to include.
    :param string zglobs: A relative zsh-style glob pattern of files to include.
    :param zglobs: A relative zsh-style glob pattern of files to include.
    :rtype: :class:`PathGlobs`
    """
    relative_to = normpath(relative_to)
    filespecs = OrderedSet()
    for specs, pattern_cls in ((files, Globs),
                               (globs, Globs),
                               (rglobs, RGlobs),
                               (zglobs, ZGlobs)):
      if not specs:
        continue
      res = pattern_cls.to_filespec(specs)
      excludes = res.get('excludes')
      if excludes:
        raise ValueError('Excludes not supported for PathGlobs. Got: {}'.format(excludes))
      new_specs = res.get('globs', None)
      if new_specs:
        filespecs.update(new_specs)
    return cls.create_from_specs(ftype, relative_to, filespecs)
开发者ID:aaronmitchell,项目名称:pants,代码行数:33,代码来源:fs.py

示例4: create

  def create(cls, relative_to, files=None, globs=None, rglobs=None, zglobs=None):
    """Given various file patterns create a PathGlobs object (without using filesystem operations).

    :param relative_to: The path that all patterns are relative to (which will itself be relative
      to the buildroot).
    :param files: A list of relative file paths to include.
    :type files: list of string.
    :param string globs: A relative glob pattern of files to include.
    :param string rglobs: A relative recursive glob pattern of files to include.
    :param string zglobs: A relative zsh-style glob pattern of files to include.
    :param zglobs: A relative zsh-style glob pattern of files to include.
    :rtype: :class:`PathGlobs`
    """
    filespecs = OrderedSet()
    for specs, pattern_cls in ((files, Globs),
                               (globs, Globs),
                               (rglobs, RGlobs),
                               (zglobs, ZGlobs)):
      if not specs:
        continue
      res = pattern_cls.to_filespec(specs)
      exclude = res.get('exclude')
      if exclude:
        raise ValueError('Excludes not supported for PathGlobs. Got: {}'.format(exclude))
      new_specs = res.get('globs', None)
      if new_specs:
        filespecs.update(new_specs)
    return cls.create_from_specs(relative_to, filespecs)
开发者ID:ericzundel,项目名称:pants,代码行数:28,代码来源:fs.py

示例5: check_artifact_cache

  def check_artifact_cache(self, vts):
    """Checks the artifact cache for the specified VersionedTargetSets.

    Returns a list of the ones that were satisfied from the cache. These don't require building.
    """
    if not vts:
      return [], []

    cached_vts = []
    uncached_vts = OrderedSet(vts)
    if self._artifact_cache and self.context.options.read_from_artifact_cache:
      pool = ThreadPool(processes=6)
      res = pool.map(lambda vt: self._artifact_cache.use_cached_files(vt.cache_key),
                     vts, chunksize=1)
      pool.close()
      pool.join()
      for vt, was_in_cache in zip(vts, res):
        if was_in_cache:
          cached_vts.append(vt)
          uncached_vts.discard(vt)
          self.context.log.info('Using cached artifacts for %s' % vt.targets)
          vt.update()
        else:
          self.context.log.info('No cached artifacts for %s' % vt.targets)
    return cached_vts, list(uncached_vts)
开发者ID:alaattinturyan,项目名称:commons,代码行数:25,代码来源:__init__.py

示例6: reduce

  def reduce(self, execution_request):
    """The main reduction loop."""
    # 1. Whenever we don't have enough work to saturate the pool, request more.
    # 2. Whenever the pool is not saturated, submit currently pending work.

    # Step instances which have not been submitted yet.
    pending_submission = OrderedSet()
    in_flight = dict()  # Dict from step id to a Promise for Steps that have been submitted.

    submit_until = functools.partial(self._submit_until, pending_submission, in_flight)
    await_one = functools.partial(self._await_one, in_flight)

    for step_batch in self._scheduler.schedule(execution_request):
      if not step_batch:
        # A batch should only be empty if all dependency work is currently blocked/running.
        if not in_flight and not pending_submission:
          raise StepBatchException(
            'Scheduler provided an empty batch while no work is in progress!')
      else:
        # Submit and wait for work for as long as we're able to keep the pool saturated.
        pending_submission.update(step_batch)
        while submit_until(self._pool_size) > 0:
          await_one()
      # Await at least one entry per scheduling loop.
      submit_until(0)
      if in_flight:
        await_one()

    # Consume all steps.
    while pending_submission or in_flight:
      submit_until(self._pool_size)
      await_one()
开发者ID:RobinTec,项目名称:pants,代码行数:32,代码来源:engine.py

示例7: configure_target

        def configure_target(target):
            if target not in analyzed_targets:
                analyzed_targets.add(target)
                self.has_scala = not self.skip_scala and (self.has_scala or is_scala(target))

                # Hack for java_sources and Eclipse/IntelliJ: add java_sources to project
                if isinstance(target, ScalaLibrary):
                    for java_source in target.java_sources:
                        configure_target(java_source)

                # Resources are already in the target set
                if target.has_resources:
                    resources_by_basedir = defaultdict(set)
                    for resources in target.resources:
                        analyzed_targets.add(resources)
                        resources_by_basedir[resources.target_base].update(relative_sources(resources))
                    for basedir, resources in resources_by_basedir.items():
                        self.resource_extensions.update(Project.extract_resource_extensions(resources))
                        configure_source_sets(basedir, resources, is_test=target.is_test, resources_only=True)
                if target.has_sources():
                    test = target.is_test
                    self.has_tests = self.has_tests or test
                    base = target.target_base
                    configure_source_sets(
                        base, relative_sources(target), is_test=test, resources_only=isinstance(target, Resources)
                    )

                # TODO(Garrett Malmquist): This is dead code, and should be redone/reintegrated.
                # Other BUILD files may specify sources in the same directory as this target. Those BUILD
                # files might be in parent directories (globs('a/b/*.java')) or even children directories if
                # this target globs children as well.  Gather all these candidate BUILD files to test for
                # sources they own that live in the directories this targets sources live in.
                target_dirset = find_source_basedirs(target)
                if not isinstance(target.address, BuildFileAddress):
                    return []  # Siblings only make sense for BUILD files.
                candidates = OrderedSet()
                build_file = target.address.build_file
                dir_relpath = os.path.dirname(build_file.relpath)
                for descendant in BuildFile.scan_build_files(
                    build_file.project_tree,
                    dir_relpath,
                    spec_excludes=self.spec_excludes,
                    build_ignore_patterns=self.build_ignore_patterns,
                ):
                    candidates.update(self.target_util.get_all_addresses(descendant))
                if not self._is_root_relpath(dir_relpath):
                    ancestors = self._collect_ancestor_build_files(
                        build_file.project_tree, os.path.dirname(dir_relpath), self.build_ignore_patterns
                    )
                    for ancestor in ancestors:
                        candidates.update(self.target_util.get_all_addresses(ancestor))

                def is_sibling(target):
                    return source_target(target) and target_dirset.intersection(find_source_basedirs(target))

                return filter(is_sibling, [self.target_util.get(a) for a in candidates if a != target.address])
开发者ID:caveness,项目名称:pants,代码行数:56,代码来源:ide_gen.py

示例8: reduce

  def reduce(self, execution_request):
    # Step instances which have not been submitted yet.
    # TODO: Scheduler now only sends work once, so a deque should be fine here.
    pending_submission = OrderedSet()
    # Dict from step id to a Promise for Steps that have been submitted.
    in_flight = dict()

    def submit_until(n):
      """Submit pending while there's capacity, and more than `n` items pending_submission."""
      to_submit = min(len(pending_submission) - n, self._pool_size - len(in_flight))
      for _ in range(to_submit):
        step, promise = pending_submission.pop(last=False)
        if step.step_id in in_flight:
          raise Exception('{} is already in_flight!'.format(step))
        in_flight[step.step_id] = promise
        self._submit(step)
      return to_submit

    def await_one():
      """Await one completed step, and remove it from in_flight."""
      if not in_flight:
        raise Exception('Awaited an empty pool!')
      step_id, result = self._pool.await_one_result()
      if isinstance(result, Exception):
        raise result
      if step_id not in in_flight:
        raise Exception('Received unexpected work from the Executor: {} vs {}'.format(step, in_flight.keys()))
      in_flight.pop(step_id).success(result)

    # The main reduction loop:
    # 1. Whenever we don't have enough work to saturate the pool, request more.
    # 2. Whenever the pool is not saturated, submit currently pending work.
    for step_batch in self._scheduler.schedule(execution_request):
      if not step_batch:
        # A batch should only be empty if all dependency work is currently blocked/running.
        if not in_flight and not pending_submission:
          raise Exception('Scheduler provided an empty batch while no work is in progress!')
      else:
        # Submit and wait for work for as long as we're able to keep the pool saturated.
        pending_submission.update(step_batch)
        while submit_until(self._pool_size) > 0:
          await_one()
      # Await at least one entry per scheduling loop.
      submit_until(0)
      if in_flight:
        await_one()

    # Consume all steps.
    while pending_submission or in_flight:
      submit_to_capacity()
      await_one()
开发者ID:jsoref,项目名称:pants,代码行数:51,代码来源:engine.py

示例9: create

    def create(cls,
               relative_to,
               files=None,
               globs=None,
               rglobs=None,
               zglobs=None):
        """Given various file patterns create a PathGlobs object (without using filesystem operations).

    TODO: This currently sortof-executes parsing via 'to_filespec'. Should maybe push that out to
    callers to make them deal with errors earlier.

    :param relative_to: The path that all patterns are relative to (which will itself be relative
                        to the buildroot).
    :param files: A list of relative file paths to include.
    :type files: list of string.
    :param string globs: A relative glob pattern of files to include.
    :param string rglobs: A relative recursive glob pattern of files to include.
    :param string zglobs: A relative zsh-style glob pattern of files to include.
    :param zglobs: A relative zsh-style glob pattern of files to include.
    :rtype: :class:`PathGlobs`
    """
        filespecs = OrderedSet()
        for specs, pattern_cls in ((files, Globs), (globs, Globs),
                                   (rglobs, RGlobs), (zglobs, ZGlobs)):
            if not specs:
                continue
            res = pattern_cls.to_filespec(specs)
            excludes = res.get('excludes')
            if excludes:
                raise ValueError(
                    'Excludes not supported for PathGlobs. Got: {}'.format(
                        excludes))
            new_specs = res.get('globs', None)
            if new_specs:
                filespecs.update(new_specs)

        path_globs = []
        for filespec in filespecs:
            # TODO: These will be implemented as part of finishing:
            #   https://github.com/pantsbuild/pants/issues/2946
            if cls._RECURSIVE in filespec:
                raise ValueError('TODO: Unsupported: {}'.format(filespec))
            elif cls._SINGLE in filespec:
                raise ValueError('TODO: Unsupported: {}'.format(filespec))
            elif '*' in filespec:
                raise ValueError('TODO: Unsupported: {}'.format(filespec))
            else:
                # A literal path.
                path_globs.append(PathGlob(relative_to, filespec))
        return cls(tuple(path_globs))
开发者ID:ahamilton55,项目名称:pants,代码行数:50,代码来源:fs.py

示例10: reduce

  def reduce(self, build_request, fail_slow=False):
    executor = self.Executor(self._pool, self._pool_size, fail_slow=fail_slow, debug=self._debug)

    # Steps move from `pending_submission` to `in_flight`.
    pending_submission = OrderedSet()
    in_flight = dict()

    def submit_until(n):
      """Submit pending while there's capacity, and more than `n` items pending_submission."""
      to_submit = min(len(pending_submission) - n, self._pool_size - len(in_flight))
      for _ in range(to_submit):
        step, promise = pending_submission.pop(last=False)
        if step in in_flight:
          raise Exception('{} is already in_flight!'.format(step))
        in_flight[step] = promise
        executor.submit(step)
      return to_submit

    def await_one():
      """Await one completed step, and remove it from in_flight."""
      if not in_flight:
        raise Exception('Awaited an empty pool!')
      step, result = executor.await_one_result()
      if step not in in_flight:
        raise Exception('Received unexpected work from the Executor: {} vs {}'.format(step, in_flight.keys()))
      in_flight.pop(step).success(result)

    # The main reduction loop:
    # 1. Whenever we don't have enough work to saturate the pool, request more.
    # 2. Whenever the pool is not saturated, submit currently pending work.
    for step_batch in self._scheduler.schedule(build_request):
      if not step_batch:
        # A batch should only be empty if all dependency work is currently blocked/running.
        if not in_flight and not pending_submission:
          raise Exception('Scheduler provided an empty batch while no work is in progress!')
      else:
        # Submit and wait for work for as long as we're able to keep the pool saturated.
        pending_submission.update(step_batch)
        while submit_until(self._pool_size) > 0:
          await_one()
      # Await at least one entry per scheduling loop.
      submit_until(0)
      if in_flight:
        await_one()

    # Consume all steps.
    while pending_submission or in_flight:
      submit_to_capacity()
      await_one()
开发者ID:caveness,项目名称:pants,代码行数:49,代码来源:engine.py

示例11: __init__

    def __init__(self, failed_to_produce):
      """
      :param failed_to_produce: A mapping of failed promises to the `FailedToProduce` product
                                representing the failure.
      :type failed_to_produce: dict of (:class:`pants.engine.exp.scheduler.Promise`,
                                        :class:`FailedToProduce`)
      """
      failed_targets = OrderedSet()
      for ftp in failed_to_produce.values():
        for f in ftp.walk():
          if isinstance(f.error, TaskError):
            failed_targets.update(f.error.failed_targets)

      super(Engine.PartialFailureError, self).__init__(exit_code=1,
                                                       failed_targets=list(failed_targets))
      self._failed_to_produce = failed_to_produce
开发者ID:dturner-tw,项目名称:pants,代码行数:16,代码来源:engine.py

示例12: check_artifact_cache

  def check_artifact_cache(self, vts):
    """Checks the artifact cache for the specified VersionedTargetSets.

    Returns a list of the ones that were satisfied from the cache. These don't require building.
    """
    if not vts:
      return [], []

    cached_vts = []
    uncached_vts = OrderedSet(vts)

    with self.context.new_workunit('check'):
      pool = ThreadPool(processes=6)
      res = pool.map(lambda vt: self._artifact_cache.use_cached_files(vt.cache_key),
                     vts, chunksize=1)
      pool.close()
      pool.join()
      for vt, was_in_cache in zip(vts, res):
        if was_in_cache:
          cached_vts.append(vt)
          uncached_vts.discard(vt)
          vt.update()
    return cached_vts, list(uncached_vts)
开发者ID:BabyDuncan,项目名称:commons,代码行数:23,代码来源:__init__.py

示例13: configure_jvm

  def configure_jvm(self, extra_source_paths, extra_test_paths):
    """
      Configures this project's source sets returning the full set of targets the project is
      comprised of.  The full set can be larger than the initial set of targets when any of the
      initial targets only has partial ownership of its source set's directories.
    """

    # TODO(John Sirois): much waste lies here, revisit structuring for more readable and efficient
    # construction of source sets and excludes ... and add a test!

    analyzed = OrderedSet()
    targeted = set()

    def relative_sources(target):
      sources = target.payload.sources.relative_to_buildroot()
      return [os.path.relpath(source, target.target_base) for source in sources]

    def source_target(target):
      result = ((self.transitive or target in self.targets) and
              target.has_sources() and
              (not (self.skip_java and is_java(target)) and
               not (self.skip_scala and is_scala(target))))
      return result

    def configure_source_sets(relative_base, sources, is_test):
      absolute_base = os.path.join(self.root_dir, relative_base)
      paths = set([os.path.dirname(source) for source in sources])
      for path in paths:
        absolute_path = os.path.join(absolute_base, path)
        # Note, this can add duplicate source paths to self.sources().  We'll de-dup them later,
        # because we want to prefer test paths.
        targeted.add(absolute_path)
        self.sources.append(SourceSet(self.root_dir, relative_base, path, is_test))

    def find_source_basedirs(target):
      dirs = set()
      if source_target(target):
        absolute_base = os.path.join(self.root_dir, target.target_base)
        dirs.update([os.path.join(absolute_base, os.path.dirname(source))
                      for source in relative_sources(target)])
      return dirs

    def configure_target(target):
      if target not in analyzed:
        analyzed.add(target)
        self.has_scala = not self.skip_scala and (self.has_scala or is_scala(target))

        # Hack for java_sources and Eclipse/IntelliJ: add java_sources to project
        if isinstance(target, ScalaLibrary):
          for java_source in target.java_sources:
            configure_target(java_source)

        if target.has_resources:
          resources_by_basedir = defaultdict(set)
          for resources in target.resources:
            resources_by_basedir[target.target_base].update(relative_sources(resources))
          for basedir, resources in resources_by_basedir.items():
            self.resource_extensions.update(Project.extract_resource_extensions(resources))
            configure_source_sets(basedir, resources, is_test=target.is_test)

        if target.has_sources():
          test = target.is_test
          self.has_tests = self.has_tests or test
          base = target.target_base
          configure_source_sets(base, relative_sources(target), is_test=test)

        # Other BUILD files may specify sources in the same directory as this target. Those BUILD
        # files might be in parent directories (globs('a/b/*.java')) or even children directories if
        # this target globs children as well.  Gather all these candidate BUILD files to test for
        # sources they own that live in the directories this targets sources live in.
        target_dirset = find_source_basedirs(target)
        if target.address.is_synthetic:
          return [] # Siblings don't make sense for synthetic addresses.
        candidates = self.target_util.get_all_addresses(target.address.build_file)
        for ancestor in target.address.build_file.ancestors():
          candidates.update(self.target_util.get_all_addresses(ancestor))
        for sibling in target.address.build_file.siblings():
          candidates.update(self.target_util.get_all_addresses(sibling))
        for descendant in target.address.build_file.descendants():
          candidates.update(self.target_util.get_all_addresses(descendant))
        def is_sibling(target):
          return source_target(target) and target_dirset.intersection(find_source_basedirs(target))

        return filter(is_sibling, [self.target_util.get(a) for a in candidates if a != target.address])

    for target in self.targets:
      target.walk(configure_target, predicate=source_target)

    def full_path(source_set):
      return os.path.join(source_set.root_dir, source_set.source_base, source_set.path)

    def dedup_sources(source_set_list):
      """Sometimes two targets with the same path are added to the source set. One is a target where
       is_test evaluates to True and the other were it evaluates to False.  When this happens,
       make sure we prefer the SourceSet with is_test set to True.
      """
      deduped_sources = set(filter(lambda source_set: source_set.is_test, source_set_list))
      for source_set in source_set_list:
        if not source_set.is_test and source_set not in deduped_sources:
          deduped_sources.add(source_set)
#.........这里部分代码省略.........
开发者ID:jcoveney,项目名称:pants,代码行数:101,代码来源:ide_gen.py

示例14: configure_jvm

  def configure_jvm(self, scala_compiler_profile, extra_source_paths, extra_test_paths):
    """
      Configures this project's source sets returning the full set of targets the project is
      comprised of.  The full set can be larger than the initial set of targets when any of the
      initial targets only has partial ownership of its source set's directories.
    """

    # TODO(John Sirois): much waste lies here, revisit structuring for more readable and efficient
    # construction of source sets and excludes ... and add a test!

    analyzed = OrderedSet()
    targeted = set()

    def source_target(target):
      return (self.transitive or target in self.targets) and has_sources(target) \
          and (not is_codegen(target)
               and not (self.skip_java and is_java(target))
               and not (self.skip_scala and is_scala(target)))

    def configure_source_sets(relative_base, sources, is_test):
      absolute_base = os.path.join(self.root_dir, relative_base)
      paths = set([ os.path.dirname(source) for source in sources])
      for path in paths:
        absolute_path = os.path.join(absolute_base, path)
        if absolute_path not in targeted:
          targeted.add(absolute_path)
          self.sources.append(SourceSet(self.root_dir, relative_base, path, is_test))

    def find_source_basedirs(target):
      dirs = set()
      if source_target(target):
        absolute_base = os.path.join(self.root_dir, target.target_base)
        dirs.update([ os.path.join(absolute_base, os.path.dirname(source))
                      for source in target.sources ])
      return dirs

    def configure_target(target):
      if target not in analyzed:
        analyzed.add(target)

        self.has_scala = not self.skip_scala and (self.has_scala or is_scala(target))

        if has_resources(target):
          resources_by_basedir = defaultdict(set)
          for resources in target.resources:
            resources_by_basedir[resources.target_base].update(resources.sources)
          for basedir, resources in resources_by_basedir.items():
            self.resource_extensions.update(Project.extract_resource_extensions(resources))
            configure_source_sets(basedir, resources, is_test=False)

        if target.sources:
          test = is_test(target)
          self.has_tests = self.has_tests or test
          configure_source_sets(target.target_base, target.sources, is_test = test)

        # Other BUILD files may specify sources in the same directory as this target.  Those BUILD
        # files might be in parent directories (globs('a/b/*.java')) or even children directories if
        # this target globs children as well.  Gather all these candidate BUILD files to test for
        # sources they own that live in the directories this targets sources live in.
        target_dirset = find_source_basedirs(target)
        candidates = Target.get_all_addresses(target.address.buildfile)
        for ancestor in target.address.buildfile.ancestors():
          candidates.update(Target.get_all_addresses(ancestor))
        for sibling in target.address.buildfile.siblings():
          candidates.update(Target.get_all_addresses(sibling))
        for descendant in target.address.buildfile.descendants():
          candidates.update(Target.get_all_addresses(descendant))

        def is_sibling(target):
          return source_target(target) and target_dirset.intersection(find_source_basedirs(target))

        return filter(is_sibling, [ Target.get(a) for a in candidates if a != target.address ])

    for target in self.targets:
      target.walk(configure_target, predicate = source_target)

    self.configure_profiles(scala_compiler_profile)

    # We need to figure out excludes, in doing so there are 2 cases we should not exclude:
    # 1.) targets depend on A only should lead to an exclude of B
    # A/BUILD
    # A/B/BUILD
    #
    # 2.) targets depend on A and C should not lead to an exclude of B (would wipe out C)
    # A/BUILD
    # A/B
    # A/B/C/BUILD
    #
    # 1 approach: build set of all paths and parent paths containing BUILDs our targets depend on -
    # these are unexcludable

    unexcludable_paths = set()
    for source_set in self.sources:
      parent = os.path.join(self.root_dir, source_set.source_base, source_set.path)
      while True:
        unexcludable_paths.add(parent)
        parent, dir = os.path.split(parent)
        # no need to add the repo root or above, all source paths and extra paths are children
        if parent == self.root_dir:
          break
#.........这里部分代码省略.........
开发者ID:BabyDuncan,项目名称:commons,代码行数:101,代码来源:ide_gen.py

示例15: configure_jvm

  def configure_jvm(self, extra_source_paths, extra_test_paths):
    """
      Configures this project's source sets returning the full set of targets the project is
      comprised of.  The full set can be larger than the initial set of targets when any of the
      initial targets only has partial ownership of its source set's directories.
    """

    # TODO(John Sirois): much waste lies here, revisit structuring for more readable and efficient
    # construction of source sets and excludes ... and add a test!

    analyzed = OrderedSet()
    targeted = set()
    targeted_tuples = {}

    def relative_sources(target):
      sources = target.payload.sources_relative_to_buildroot()
      return [os.path.relpath(source, target.target_base) for source in sources]

    def source_target(target):
      result = ((self.transitive or target in self.targets) and
              target.has_sources() and
              (not (self.skip_java and is_java(target)) and
               not (self.skip_scala and is_scala(target))))
      return result

    def configure_source_sets(relative_base, sources, is_test):
      absolute_base = os.path.join(self.root_dir, relative_base)
      paths = set([os.path.dirname(source) for source in sources])
      for path in paths:
        absolute_path = os.path.join(absolute_base, path)
        pieces = (relative_base, path)
        # Previously this if-statement was testing against absolute_path's presence in targeted.
        # This broke in the (very weird) edge-case where two different sources have the same
        # absolute path, but choose the split between relative_base and path differently. It's
        # really important that we distinguish between them still, because the package name changes.
        # TODO(Garrett Malmquist): Fix the underlying bugs in pants that make this necessary.
        if pieces not in targeted_tuples:
          targeted.add(absolute_path)
          targeted_tuples[pieces] = sources
          self.sources.append(SourceSet(self.root_dir, relative_base, path, is_test))

    def find_source_basedirs(target):
      dirs = set()
      if source_target(target):
        absolute_base = os.path.join(self.root_dir, target.target_base)
        dirs.update([os.path.join(absolute_base, os.path.dirname(source))
                      for source in relative_sources(target)])
      return dirs

    def configure_target(target):
      if target not in analyzed:
        analyzed.add(target)
        self.has_scala = not self.skip_scala and (self.has_scala or is_scala(target))

        if target.has_resources:
          resources_by_basedir = defaultdict(set)
          for resources in target.resources:
            resources_by_basedir[target.target_base].update(relative_sources(resources))
          for basedir, resources in resources_by_basedir.items():
            self.resource_extensions.update(Project.extract_resource_extensions(resources))
            configure_source_sets(basedir, resources, is_test=False)

        if target.has_sources():
          test = target.is_test
          self.has_tests = self.has_tests or test
          base = target.target_base
          configure_source_sets(base, relative_sources(target), is_test=test)

        # Other BUILD files may specify sources in the same directory as this target. Those BUILD
        # files might be in parent directories (globs('a/b/*.java')) or even children directories if
        # this target globs children as well.  Gather all these candidate BUILD files to test for
        # sources they own that live in the directories this targets sources live in.
        target_dirset = find_source_basedirs(target)
        if target.address.is_synthetic:
          return [] # Siblings don't make sense for synthetic addresses.
        candidates = self.target_util.get_all_addresses(target.address.build_file)
        for ancestor in target.address.build_file.ancestors():
          candidates.update(self.target_util.get_all_addresses(ancestor))
        for sibling in target.address.build_file.siblings():
          candidates.update(self.target_util.get_all_addresses(sibling))
        for descendant in target.address.build_file.descendants():
          candidates.update(self.target_util.get_all_addresses(descendant))
        def is_sibling(target):
          return source_target(target) and target_dirset.intersection(find_source_basedirs(target))

        return filter(is_sibling, [self.target_util.get(a) for a in candidates if a != target.address])

    for target in self.targets:
      target.walk(configure_target, predicate=source_target)

    def full_path(source_set):
      return os.path.join(source_set.root_dir, source_set.source_base, source_set.path)

    # Check if there are any overlapping source_sets, and output an error message if so.
    # Overlapping source_sets cause serious problems with package name inference.
    overlap_error = ('SourceSets {current} and {previous} evaluate to the same full path.'
                     ' This can be caused by multiple BUILD targets claiming the same source,'
                     ' e.g., if a BUILD target in a parent directory contains an rglobs() while'
                     ' a BUILD target in a subdirectory of that uses a globs() which claims the'
                     ' same sources. This may cause package names to be inferred incorrectly (e.g.,'
#.........这里部分代码省略.........
开发者ID:cheecheeo,项目名称:pants,代码行数:101,代码来源:ide_gen.py


注:本文中的twitter.common.collections.orderedset.OrderedSet类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。