当前位置: 首页>>代码示例>>Python>>正文


Python Target.maybe_readable_identify方法代码示例

本文整理汇总了Python中twitter.pants.base.Target.maybe_readable_identify方法的典型用法代码示例。如果您正苦于以下问题:Python Target.maybe_readable_identify方法的具体用法?Python Target.maybe_readable_identify怎么用?Python Target.maybe_readable_identify使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在twitter.pants.base.Target的用法示例。


在下文中一共展示了Target.maybe_readable_identify方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: execute

# 需要导入模块: from twitter.pants.base import Target [as 别名]
# 或者: from twitter.pants.base.Target import maybe_readable_identify [as 别名]
  def execute(self, targets):
    scala_targets = filter(lambda t: has_sources(t, '.scala'), targets)
    if not scala_targets:
      return

    # Get the exclusives group for the targets to compile.
    # Group guarantees that they'll be a single exclusives key for them.
    egroups = self.context.products.get_data('exclusives_groups')
    group_id = egroups.get_group_key_for_target(scala_targets[0])

    # Add resource dirs to the classpath for us and for downstream tasks.
    for conf in self._confs:
      egroups.update_compatible_classpaths(group_id, [(conf, self._resources_dir)])

    # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
    cp = egroups.get_classpath_for_group(group_id)

    # Add (only to the local copy) classpath entries necessary for our compiler plugins.
    for conf in self._confs:
      for jar in self._zinc_utils.plugin_jars():
        cp.insert(0, (conf, jar))

    # Invalidation check. Everything inside the with block must succeed for the
    # invalid targets to become valid.
    with self.invalidated(scala_targets, invalidate_dependents=True,
                          partition_size_hint=self._partition_size_hint) as invalidation_check:
      if invalidation_check.invalid_vts and not self.dry_run:
        invalid_targets = [vt.target for vt in invalidation_check.invalid_vts]
        # The analysis for invalid and deleted sources is no longer valid.
        invalid_sources_by_target = self._compute_sources_by_target(invalid_targets)
        invalid_sources = list(itertools.chain.from_iterable(invalid_sources_by_target.values()))
        deleted_sources = self._get_deleted_sources()

        # Work in a tmpdir so we don't stomp the main analysis files on error.
        # The tmpdir is cleaned up in a shutdown hook, because background work
        # may need to access files we create here even after this method returns.
        self._ensure_analysis_tmpdir()
        tmpdir = os.path.join(self._analysis_tmpdir, str(uuid.uuid4()))
        os.mkdir(tmpdir)
        valid_analysis_tmp = os.path.join(tmpdir, 'valid_analysis')
        newly_invalid_analysis_tmp = os.path.join(tmpdir, 'newly_invalid_analysis')
        invalid_analysis_tmp = os.path.join(tmpdir, 'invalid_analysis')
        if ZincUtils.is_nonempty_analysis(self._analysis_file):
          with self.context.new_workunit(name='prepare-analysis'):
            if self._zinc_utils.run_zinc_split(self._analysis_file,
                                               ((invalid_sources + deleted_sources, newly_invalid_analysis_tmp),
                                                ([], valid_analysis_tmp))):
              raise TaskError('Failed to split off invalid analysis.')
            if ZincUtils.is_nonempty_analysis(self._invalid_analysis_file):
              if self._zinc_utils.run_zinc_merge([self._invalid_analysis_file, newly_invalid_analysis_tmp],
                                                 invalid_analysis_tmp):
                raise TaskError('Failed to merge prior and current invalid analysis.')
            else:
              invalid_analysis_tmp = newly_invalid_analysis_tmp

            # Now it's OK to overwrite the main analysis files with the new state.
            ZincUtils._move_analysis(valid_analysis_tmp, self._analysis_file)
            ZincUtils._move_analysis(invalid_analysis_tmp, self._invalid_analysis_file)

        # Figure out the sources and analysis belonging to each partition.
        partitions = []  # Each element is a triple (vts, sources_by_target, analysis).
        for vts in invalidation_check.invalid_vts_partitioned:
          partition_tmpdir = os.path.join(tmpdir, Target.maybe_readable_identify(vts.targets))
          os.mkdir(partition_tmpdir)
          sources = list(itertools.chain.from_iterable(
            [invalid_sources_by_target.get(t, []) for t in vts.targets]))
          analysis_file = os.path.join(partition_tmpdir, 'analysis')
          partitions.append((vts, sources, analysis_file))

        # Split per-partition files out of the global invalid analysis.
        if ZincUtils.is_nonempty_analysis(self._invalid_analysis_file) and partitions:
          with self.context.new_workunit(name='partition-analysis'):
            splits = [(x[1], x[2]) for x in partitions]
            if self._zinc_utils.run_zinc_split(self._invalid_analysis_file, splits):
              raise TaskError('Failed to split invalid analysis into per-partition files.')

        # Now compile partitions one by one.
        for partition in partitions:
          (vts, sources, analysis_file) = partition
          self._process_target_partition(partition, cp)
          # No exception was thrown, therefore the compile succeded and analysis_file is now valid.

          if os.path.exists(analysis_file):  # The compilation created an analysis.
            # Kick off the background artifact cache write.
            if self.get_artifact_cache() and self.context.options.write_to_artifact_cache:
              self._write_to_artifact_cache(analysis_file, vts, invalid_sources_by_target)

            # Merge the newly-valid analysis into our global valid analysis.
            if ZincUtils.is_nonempty_analysis(self._analysis_file):
              with self.context.new_workunit(name='update-upstream-analysis'):
                new_valid_analysis = analysis_file + '.valid.new'
                if self._zinc_utils.run_zinc_merge([self._analysis_file, analysis_file], new_valid_analysis):
                  raise TaskError('Failed to merge new analysis back into valid analysis file.')
              ZincUtils._move_analysis(new_valid_analysis, self._analysis_file)
            else:  # We need to keep analysis_file around. Background tasks may need it.
              ZincUtils._copy_analysis(analysis_file, self._analysis_file)

          if ZincUtils.is_nonempty_analysis(self._invalid_analysis_file):
            with self.context.new_workunit(name='trim-downstream-analysis'):
              # Trim out the newly-valid sources from our global invalid analysis.
#.........这里部分代码省略.........
开发者ID:davearata,项目名称:twitter-commons,代码行数:103,代码来源:scala_compile.py

示例2: execute

# 需要导入模块: from twitter.pants.base import Target [as 别名]
# 或者: from twitter.pants.base.Target import maybe_readable_identify [as 别名]
  def execute(self, targets):
    # TODO(benjy): Add a pre-execute phase for injecting deps into targets, so we
    # can inject a dep on the scala runtime library and still have it ivy-resolve.

    scala_targets = [t for t in targets if t.has_sources('.scala')]
    
    if not scala_targets:
      return

    # Get the exclusives group for the targets to compile.
    # Group guarantees that they'll be a single exclusives key for them.
    egroups = self.context.products.get_data('exclusives_groups')
    group_id = egroups.get_group_key_for_target(scala_targets[0])

    # Add resource dirs to the classpath for us and for downstream tasks.
    for conf in self._confs:
      egroups.update_compatible_classpaths(group_id, [(conf, self._resources_dir)])

    # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
    cp = egroups.get_classpath_for_group(group_id)

    # Add (only to the local copy) classpath entries necessary for our compiler plugins.
    for conf in self._confs:
      for jar in self._zinc_utils.plugin_jars():
        cp.insert(0, (conf, jar))

    # Invalidation check. Everything inside the with block must succeed for the
    # invalid targets to become valid.
    with self.invalidated(scala_targets, invalidate_dependents=True,
                          partition_size_hint=self._partition_size_hint) as invalidation_check:
      if invalidation_check.invalid_vts and not self.dry_run:
        invalid_targets = [vt.target for vt in invalidation_check.invalid_vts]
        # The analysis for invalid and deleted sources is no longer valid.
        invalid_sources_by_target = self._compute_sources_by_target(invalid_targets)
        invalid_sources = list(itertools.chain.from_iterable(invalid_sources_by_target.values()))
        deleted_sources = self._get_deleted_sources()

        # Work in a tmpdir so we don't stomp the main analysis files on error.
        # The tmpdir is cleaned up in a shutdown hook, because background work
        # may need to access files we create here even after this method returns.
        self._ensure_analysis_tmpdir()
        tmpdir = os.path.join(self._analysis_tmpdir, str(uuid.uuid4()))
        os.mkdir(tmpdir)
        valid_analysis_tmp = os.path.join(tmpdir, 'valid_analysis')
        newly_invalid_analysis_tmp = os.path.join(tmpdir, 'newly_invalid_analysis')
        invalid_analysis_tmp = os.path.join(tmpdir, 'invalid_analysis')
        if ZincUtils.is_nonempty_analysis(self._analysis_file):
          with self.context.new_workunit(name='prepare-analysis'):
            Analysis.split_to_paths(self._analysis_file,
                                    [(invalid_sources + deleted_sources, newly_invalid_analysis_tmp)], valid_analysis_tmp)
            if ZincUtils.is_nonempty_analysis(self._invalid_analysis_file):
              Analysis.merge_from_paths([self._invalid_analysis_file, newly_invalid_analysis_tmp],
                                        invalid_analysis_tmp)
            else:
              invalid_analysis_tmp = newly_invalid_analysis_tmp

            # Now it's OK to overwrite the main analysis files with the new state.
            shutil.move(valid_analysis_tmp, self._analysis_file)
            shutil.move(invalid_analysis_tmp, self._invalid_analysis_file)

        # Figure out the sources and analysis belonging to each partition.
        partitions = []  # Each element is a triple (vts, sources_by_target, analysis).
        for vts in invalidation_check.invalid_vts_partitioned:
          partition_tmpdir = os.path.join(tmpdir, Target.maybe_readable_identify(vts.targets))
          os.mkdir(partition_tmpdir)
          sources = list(itertools.chain.from_iterable(
            [invalid_sources_by_target.get(t, []) for t in vts.targets]))
          analysis_file = os.path.join(partition_tmpdir, 'analysis')
          partitions.append((vts, sources, analysis_file))

        # Split per-partition files out of the global invalid analysis.
        if ZincUtils.is_nonempty_analysis(self._invalid_analysis_file) and partitions:
          with self.context.new_workunit(name='partition-analysis'):
            splits = [(x[1], x[2]) for x in partitions]
            Analysis.split_to_paths(self._invalid_analysis_file, splits)

        # Now compile partitions one by one.
        for partition in partitions:
          (vts, sources, analysis_file) = partition
          self._process_target_partition(partition, cp)
          # No exception was thrown, therefore the compile succeded and analysis_file is now valid.

          if os.path.exists(analysis_file):  # The compilation created an analysis.
            # Merge the newly-valid analysis with our global valid analysis.
            new_valid_analysis = analysis_file + '.valid.new'
            if ZincUtils.is_nonempty_analysis(self._analysis_file):
              with self.context.new_workunit(name='update-upstream-analysis'):
                Analysis.merge_from_paths([self._analysis_file, analysis_file], new_valid_analysis)
            else:  # We need to keep analysis_file around. Background tasks may need it.
              shutil.copy(analysis_file, new_valid_analysis)

            # Move the merged valid analysis to its proper location.
            # We do this before checking for missing dependencies, so that we can still
            # enjoy an incremental compile after fixing missing deps.
            shutil.move(new_valid_analysis, self._analysis_file)

            # Check for missing dependencies.
            actual_deps = Analysis.parse_deps_from_path(self._analysis_file)
            # TODO(benjy): Temporary hack until we inject a dep on the scala runtime jar.
            actual_deps_filtered = {}
#.........这里部分代码省略.........
开发者ID:ssalevan,项目名称:commons,代码行数:103,代码来源:scala_compile.py

示例3: execute

# 需要导入模块: from twitter.pants.base import Target [as 别名]
# 或者: from twitter.pants.base.Target import maybe_readable_identify [as 别名]
  def execute(self, targets):
    # TODO(benjy): Add a pre-execute phase for injecting deps into targets, so e.g.,
    # we can inject a dep on the scala runtime library and still have it ivy-resolve.

    relevant_targets = [t for t in targets if t.has_sources(self._file_suffix)]

    if not relevant_targets:
      return

    # Get the exclusives group for the targets to compile.
    # Group guarantees that they'll be a single exclusives key for them.
    egroups = self.context.products.get_data('exclusives_groups')
    group_id = egroups.get_group_key_for_target(relevant_targets[0])

    # Add resource dirs to the classpath for us and for downstream tasks.
    for conf in self._confs:
      egroups.update_compatible_classpaths(group_id, [(conf, self._resources_dir)])

    # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
    classpath = egroups.get_classpath_for_group(group_id)

    # Add any extra classpath elements.
    for conf in self._confs:
      for jar in self.extra_classpath_elements():
        classpath.insert(0, (conf, jar))

    # Target -> sources (relative to buildroot).
    sources_by_target = self._compute_sources_by_target(relevant_targets)

    # Invalidation check. Everything inside the with block must succeed for the
    # invalid targets to become valid.
    with self.invalidated(relevant_targets,
                          invalidate_dependents=True,
                          partition_size_hint=self._partition_size_hint) as invalidation_check:
      if invalidation_check.invalid_vts and not self.dry_run:
        # The analysis for invalid and deleted sources is no longer valid.
        invalid_targets = [vt.target for vt in invalidation_check.invalid_vts]
        invalid_sources_by_target = {}
        for tgt in invalid_targets:
          invalid_sources_by_target[tgt] = sources_by_target[tgt]
        invalid_sources = list(itertools.chain.from_iterable(invalid_sources_by_target.values()))
        deleted_sources = self._deleted_sources()

        # Work in a tmpdir so we don't stomp the main analysis files on error.
        # The tmpdir is cleaned up in a shutdown hook, because background work
        # may need to access files we create here even after this method returns.
        self._ensure_analysis_tmpdir()
        tmpdir = os.path.join(self._analysis_tmpdir, str(uuid.uuid4()))
        os.mkdir(tmpdir)
        valid_analysis_tmp = os.path.join(tmpdir, 'valid_analysis')
        newly_invalid_analysis_tmp = os.path.join(tmpdir, 'newly_invalid_analysis')
        invalid_analysis_tmp = os.path.join(tmpdir, 'invalid_analysis')
        if self._analysis_parser.is_nonempty_analysis(self._analysis_file):
          with self.context.new_workunit(name='prepare-analysis'):
            self._analysis_tools.split_to_paths(self._analysis_file,
              [(invalid_sources + deleted_sources, newly_invalid_analysis_tmp)], valid_analysis_tmp)
            if self._analysis_parser.is_nonempty_analysis(self._invalid_analysis_file):
              self._analysis_tools.merge_from_paths(
                [self._invalid_analysis_file, newly_invalid_analysis_tmp], invalid_analysis_tmp)
            else:
              invalid_analysis_tmp = newly_invalid_analysis_tmp

            # Now it's OK to overwrite the main analysis files with the new state.
            shutil.move(valid_analysis_tmp, self._analysis_file)
            shutil.move(invalid_analysis_tmp, self._invalid_analysis_file)

        # Register products for all the valid targets.
        # We register as we go, so dependency checking code can use this data.
        valid_targets = list(set(relevant_targets) - set(invalid_targets))
        self._register_products(valid_targets, sources_by_target, self._analysis_file)

        # Figure out the sources and analysis belonging to each partition.
        partitions = []  # Each element is a triple (vts, sources_by_target, analysis).
        for vts in invalidation_check.invalid_vts_partitioned:
          partition_tmpdir = os.path.join(tmpdir, Target.maybe_readable_identify(vts.targets))
          os.mkdir(partition_tmpdir)
          sources = list(itertools.chain.from_iterable(
            [invalid_sources_by_target.get(t, []) for t in vts.targets]))
          analysis_file = os.path.join(partition_tmpdir, 'analysis')
          partitions.append((vts, sources, analysis_file))

        # Split per-partition files out of the global invalid analysis.
        if self._analysis_parser.is_nonempty_analysis(self._invalid_analysis_file) and partitions:
          with self.context.new_workunit(name='partition-analysis'):
            splits = [(x[1], x[2]) for x in partitions]
            self._analysis_tools.split_to_paths(self._invalid_analysis_file, splits)

        # Now compile partitions one by one.
        for partition in partitions:
          (vts, sources, analysis_file) = partition
          cp_entries = [entry for conf, entry in classpath if conf in self._confs]
          self._process_target_partition(partition, cp_entries)
          # No exception was thrown, therefore the compile succeded and analysis_file is now valid.
          if os.path.exists(analysis_file):  # The compilation created an analysis.
            # Merge the newly-valid analysis with our global valid analysis.
            new_valid_analysis = analysis_file + '.valid.new'
            if self._analysis_parser.is_nonempty_analysis(self._analysis_file):
              with self.context.new_workunit(name='update-upstream-analysis'):
                self._analysis_tools.merge_from_paths([self._analysis_file, analysis_file], new_valid_analysis)
            else:  # We need to keep analysis_file around. Background tasks may need it.
#.........这里部分代码省略.........
开发者ID:alfss,项目名称:commons,代码行数:103,代码来源:jvm_compile.py


注:本文中的twitter.pants.base.Target.maybe_readable_identify方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。