当前位置: 首页>>代码示例>>Python>>正文


Python Target.maybe_readable_identify方法代码示例

本文整理汇总了Python中twitter.pants.base.target.Target.maybe_readable_identify方法的典型用法代码示例。如果您正苦于以下问题:Python Target.maybe_readable_identify方法的具体用法?Python Target.maybe_readable_identify怎么用?Python Target.maybe_readable_identify使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在twitter.pants.base.target.Target的用法示例。


在下文中一共展示了Target.maybe_readable_identify方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: execute

# 需要导入模块: from twitter.pants.base.target import Target [as 别名]
# 或者: from twitter.pants.base.target.Target import maybe_readable_identify [as 别名]
  def execute(self, targets):
    java_targets = filter(_is_java, targets)
    if java_targets:
      safe_mkdir(self._classes_dir)
      safe_mkdir(self._depfile_dir)

      egroups = self.context.products.get_data('exclusives_groups')
      group_id = egroups.get_group_key_for_target(java_targets[0])
      for conf in self._confs:
        egroups.update_compatible_classpaths(group_id, [(conf, self._resources_dir)])
        egroups.update_compatible_classpaths(group_id, [(conf, self._classes_dir)])


      with self.invalidated(java_targets, invalidate_dependents=True,
                            partition_size_hint=self._partition_size_hint) as invalidation_check:
        for vt in invalidation_check.invalid_vts_partitioned:
          # Compile, using partitions for efficiency.
          exclusives_classpath = egroups.get_classpath_for_group(group_id)
          self.execute_single_compilation(vt, exclusives_classpath)
          if not self.dry_run:
            vt.update()

        for vt in invalidation_check.all_vts:
          depfile = self.create_depfile_path(vt.targets)
          if not self.dry_run and os.path.exists(depfile):
            # Read in the deps created either just now or by a previous run on these targets.
            deps = Dependencies(self._classes_dir)
            deps.load(depfile)
            self._deps.merge(deps)

      if not self.dry_run:
        if self.context.products.isrequired('classes'):
          genmap = self.context.products.get('classes')
          # Map generated classes to the owning targets and sources.
          for target, classes_by_source in self._deps.findclasses(java_targets).items():
            for source, classes in classes_by_source.items():
              genmap.add(source, self._classes_dir, classes)
              genmap.add(target, self._classes_dir, classes)

          # TODO(John Sirois): Map target.resources in the same way
          # 'Map' (rewrite) annotation processor service info files to the owning targets.
          for target in java_targets:
            if is_apt(target) and target.processors:
              basedir = os.path.join(self._resources_dir, Target.maybe_readable_identify([target]))
              processor_info_file = os.path.join(basedir, _PROCESSOR_INFO_FILE)
              self.write_processor_info(processor_info_file, target.processors)
              genmap.add(target, basedir, [_PROCESSOR_INFO_FILE])

        # Produce a monolithic apt processor service info file for further compilation rounds
        # and the unit test classpath.
        all_processors = set()
        for target in java_targets:
          if is_apt(target) and target.processors:
            all_processors.update(target.processors)
        processor_info_file = os.path.join(self._classes_dir, _PROCESSOR_INFO_FILE)
        if os.path.exists(processor_info_file):
          with safe_open(processor_info_file, 'r') as f:
            for processor in f:
              all_processors.add(processor.strip())
        self.write_processor_info(processor_info_file, all_processors)
开发者ID:BabyDuncan,项目名称:commons,代码行数:62,代码来源:java_compile.py

示例2: _artifact_args

# 需要导入模块: from twitter.pants.base.target import Target [as 别名]
# 或者: from twitter.pants.base.target.Target import maybe_readable_identify [as 别名]
 def _artifact_args(self, targets):
   """Returns the artifact paths for the given target set."""
   artifact_id = Target.maybe_readable_identify(targets)
   # Each compilation must output to its own directory, so zinc can then associate those with the
   # appropriate analysis files of previous compilations.
   classes_dir = os.path.join(self._classes_dirs_base, artifact_id)
   analysis_file = os.path.join(self._analysis_files_base, artifact_id) + '.analysis'
   return artifact_id, classes_dir, analysis_file
开发者ID:ewhauser,项目名称:commons,代码行数:10,代码来源:zinc_artifact.py

示例3: extra_products

# 需要导入模块: from twitter.pants.base.target import Target [as 别名]
# 或者: from twitter.pants.base.target.Target import maybe_readable_identify [as 别名]
 def extra_products(self, target):
   ret = []
   if target.is_apt and target.processors:
     root = os.path.join(self._resources_dir, Target.maybe_readable_identify([target]))
     processor_info_file = os.path.join(root, JavaCompile._PROCESSOR_INFO_FILE)
     self._write_processor_info(processor_info_file, target.processors)
     ret.append((root, [processor_info_file]))
   return ret
开发者ID:alfss,项目名称:commons,代码行数:10,代码来源:java_compile.py

示例4: create_output_paths

# 需要导入模块: from twitter.pants.base.target import Target [as 别名]
# 或者: from twitter.pants.base.target.Target import maybe_readable_identify [as 别名]
  def create_output_paths(self, targets):
    compilation_id = Target.maybe_readable_identify(targets)
    # Each compilation must output to its own directory, so zinc can then associate those with the appropriate
    # analysis caches of previous compilations.
    output_dir = os.path.join(self._classes_dir, compilation_id)

    depfile = os.path.join(self._depfile_dir, compilation_id) + '.dependencies'
    analysis_cache = os.path.join(self._analysis_cache_dir, compilation_id) + '.analysis_cache'
    return output_dir, depfile, analysis_cache
开发者ID:lxwuchang,项目名称:commons,代码行数:11,代码来源:scala_compile.py

示例5: _output_paths

# 需要导入模块: from twitter.pants.base.target import Target [as 别名]
# 或者: from twitter.pants.base.target.Target import maybe_readable_identify [as 别名]
  def _output_paths(self, targets):
    """Returns the full paths to the classes dir, depfile and analysis file for the given target set."""
    compilation_id = Target.maybe_readable_identify(targets)
    # Each compilation must output to its own directory, so zinc can then associate those with the appropriate
    # analysis files of previous compilations.
    classes_dir = os.path.join(self._classes_dir_base, compilation_id)

    depfile = os.path.join(self._depfiles_base, compilation_id) + '.dependencies'
    analysis_file = os.path.join(self._analysis_files_base, compilation_id) + '.analysis'
    return classes_dir, depfile, analysis_file
开发者ID:bag-of-projects,项目名称:commons,代码行数:12,代码来源:scala_compile.py

示例6: extra_products

# 需要导入模块: from twitter.pants.base.target import Target [as 别名]
# 或者: from twitter.pants.base.target.Target import maybe_readable_identify [as 别名]
 def extra_products(self, target):
   ret = []
   # TODO(John Sirois): Map target.resources in the same way.
   # 'Map' (rewrite) annotation processor service info files to the owning targets.
   if target.is_apt and target.processors:
     basedir = os.path.join(self._resources_dir, Target.maybe_readable_identify([target]))
     processor_info_file = os.path.join(basedir, JavaCompile._PROCESSOR_INFO_FILE)
     self._write_processor_info(processor_info_file, target.processors)
     ret.append((basedir, [processor_info_file]))
   return ret
开发者ID:theyelllowdart,项目名称:commons,代码行数:12,代码来源:java_compile.py

示例7: _add_all_products_to_genmap

# 需要导入模块: from twitter.pants.base.target import Target [as 别名]
# 或者: from twitter.pants.base.target.Target import maybe_readable_identify [as 别名]
  def _add_all_products_to_genmap(self, sources_by_target, classes_by_source):
    # Map generated classes to the owning targets and sources.
    genmap = self.context.products.get('classes')
    for target, sources in sources_by_target.items():
      for source in sources:
        classes = classes_by_source.get(source, [])
        relsrc = os.path.relpath(source, target.target_base)
        genmap.add(relsrc, self._classes_dir, classes)
        genmap.add(target, self._classes_dir, classes)

      # TODO(John Sirois): Map target.resources in the same way
      # 'Map' (rewrite) annotation processor service info files to the owning targets.
      if is_apt(target) and target.processors:
        basedir = os.path.join(self._resources_dir, Target.maybe_readable_identify([target]))
        processor_info_file = os.path.join(basedir, _PROCESSOR_INFO_FILE)
        self.write_processor_info(processor_info_file, target.processors)
        genmap.add(target, basedir, [_PROCESSOR_INFO_FILE])
开发者ID:bollwang,项目名称:commons,代码行数:19,代码来源:java_compile.py

示例8: execute_single_compilation

# 需要导入模块: from twitter.pants.base.target import Target [as 别名]
# 或者: from twitter.pants.base.target.Target import maybe_readable_identify [as 别名]
  def execute_single_compilation(self, versioned_targets, cp):
    compilation_id = Target.maybe_readable_identify(versioned_targets.targets)

    # TODO: Use the artifact cache. In flat mode we may want to look for the artifact for all targets,
    # not just the invalid ones, as it might be more likely to be present. Or we could look for both.

    if self._flatten:
      # If compiling in flat mode, we let all dependencies aggregate into a single well-known depfile. This
      # allows us to build different targets in different invocations without losing dependency information
      # from any of them.
      depfile = os.path.join(self._depfile_dir, 'dependencies.flat')
    else:
      # If not in flat mode, we let each compilation have its own depfile, to avoid quadratic behavior (each
      # compilation will read in the entire depfile, add its stuff to it and write it out again).
      depfile = os.path.join(self._depfile_dir, compilation_id) + '.dependencies'

    if not versioned_targets.valid:
      self.context.log.info('Compiling targets %s' % str(versioned_targets.targets))
      sources_by_target, processors, fingerprint = self.calculate_sources(versioned_targets.targets)
      if sources_by_target:
        sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values())
        if not sources:
          touch(depfile)  # Create an empty depfile, since downstream code may assume that one exists.
          self.context.log.warn('Skipping java compile for targets with no sources:\n  %s' %
                                '\n  '.join(str(t) for t in sources_by_target.keys()))
        else:
          classpath = [jar for conf, jar in cp if conf in self._confs]
          result = self.compile(classpath, sources, fingerprint, depfile)
          if result != 0:
            default_message = 'Unexpected error - %s returned %d' % (_JMAKE_MAIN, result)
            raise TaskError(_JMAKE_ERROR_CODES.get(result, default_message))

        if processors:
          # Produce a monolithic apt processor service info file for further compilation rounds
          # and the unit test classpath.
          processor_info_file = os.path.join(self._classes_dir, _PROCESSOR_INFO_FILE)
          if os.path.exists(processor_info_file):
            with safe_open(processor_info_file, 'r') as f:
              for processor in f:
                processors.add(processor.strip())
          self.write_processor_info(processor_info_file, processors)

    # Read in the deps created either just now or by a previous compiler run on these targets.
    deps = Dependencies(self._classes_dir)
    deps.load(depfile)
    self._deps.merge(deps)
开发者ID:kevints,项目名称:commons,代码行数:48,代码来源:java_compile.py

示例9: create_depfile_path

# 需要导入模块: from twitter.pants.base.target import Target [as 别名]
# 或者: from twitter.pants.base.target.Target import maybe_readable_identify [as 别名]
 def create_depfile_path(self, targets):
   compilation_id = Target.maybe_readable_identify(targets)
   return os.path.join(self._depfile_dir, compilation_id) + '.dependencies'
开发者ID:wickman,项目名称:commons,代码行数:5,代码来源:java_compile.py

示例10: execute_single_compilation

# 需要导入模块: from twitter.pants.base.target import Target [as 别名]
# 或者: from twitter.pants.base.target.Target import maybe_readable_identify [as 别名]
  def execute_single_compilation(self, versioned_target_set, cp, upstream_analysis_caches):
    """Execute a single compilation, updating upstream_analysis_caches if needed."""
    if self._flatten:
      compilation_id = 'flat'
      output_dir = self._flat_classes_dir
    else:
      compilation_id = Target.maybe_readable_identify(versioned_target_set.targets)
      # Each compilation must output to its own directory, so zinc can then associate those with the appropriate
      # analysis caches of previous compilations. We then copy the results out to the real output dir.
      output_dir = os.path.join(self._incremental_classes_dir, compilation_id)

    depfile = os.path.join(self._depfile_dir, compilation_id) + '.dependencies'
    analysis_cache = os.path.join(self._analysis_cache_dir, compilation_id) + '.analysis_cache'

    safe_mkdir(output_dir)

    if not versioned_target_set.valid:
      with self.check_artifact_cache(versioned_target_set,
                                     build_artifacts=[output_dir, depfile, analysis_cache],
                                     artifact_root=self._workdir) as needs_building:
        if needs_building:
          self.context.log.info('Compiling targets %s' % versioned_target_set.targets)
          sources_by_target = self.calculate_sources(versioned_target_set.targets)
          if sources_by_target:
            sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values())
            if not sources:
              touch(depfile)  # Create an empty depfile, since downstream code may assume that one exists.
              self.context.log.warn('Skipping scala compile for targets with no sources:\n  %s' %
                                    '\n  '.join(str(t) for t in sources_by_target.keys()))
            else:
              classpath = [jar for conf, jar in cp if conf in self._confs]
              result = self.compile(classpath, sources, output_dir, analysis_cache, upstream_analysis_caches, depfile)
              if result != 0:
                raise TaskError('%s returned %d' % (self._main, result))

    # Note that the following post-processing steps must happen even for valid targets.

    # Read in the deps created either just now or by a previous compiler run on these targets.
    if self.context.products.isrequired('classes'):
      self.context.log.debug('Reading dependencies from ' + depfile)
      deps = Dependencies(output_dir)
      deps.load(depfile)

      genmap = self.context.products.get('classes')

      for target, classes_by_source in deps.findclasses(versioned_target_set.targets).items():
        for source, classes in classes_by_source.items():
          genmap.add(source, output_dir, classes)
          genmap.add(target, output_dir, classes)

      # TODO(John Sirois): Map target.resources in the same way
      # Create and Map scala plugin info files to the owning targets.
      for target in versioned_target_set.targets:
        if is_scalac_plugin(target) and target.classname:
          basedir = self.write_plugin_info(target)
          genmap.add(target, basedir, [_PLUGIN_INFO_FILE])

    # Update the upstream analysis map.
    analysis_cache_parts = os.path.split(analysis_cache)
    if not upstream_analysis_caches.has(output_dir):
      # A previous chunk might have already updated this. It is certainly possible for a later chunk to
      # independently depend on some target that a previous chunk already built.
      upstream_analysis_caches.add(output_dir, analysis_cache_parts[0], [ analysis_cache_parts[1] ])

    # Update the classpath.
    with self.context.state('classpath', []) as cp:
      for conf in self._confs:
        cp.insert(0, (conf, output_dir))
开发者ID:kevints,项目名称:commons,代码行数:70,代码来源:scala_compile.py

示例11: execute

# 需要导入模块: from twitter.pants.base.target import Target [as 别名]
# 或者: from twitter.pants.base.target.Target import maybe_readable_identify [as 别名]
  def execute(self, targets):
    # TODO(benjy): Add a pre-execute phase for injecting deps into targets, so e.g.,
    # we can inject a dep on the scala runtime library and still have it ivy-resolve.

    # In case we have no relevant targets and return early.
    self._create_empty_products()

    relevant_targets = [t for t in targets if t.has_sources(self._file_suffix)]

    if not relevant_targets:
      return

    # Get the exclusives group for the targets to compile.
    # Group guarantees that they'll be a single exclusives key for them.
    egroups = self.context.products.get_data('exclusives_groups')
    group_id = egroups.get_group_key_for_target(relevant_targets[0])

    # Add resource dirs to the classpath for us and for downstream tasks.
    for conf in self._confs:
      egroups.update_compatible_classpaths(group_id, [(conf, self._resources_dir)])

    # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
    classpath = egroups.get_classpath_for_group(group_id)

    # Add any extra classpath elements.
    for conf in self._confs:
      for jar in self.extra_classpath_elements():
        classpath.insert(0, (conf, jar))

    # Target -> sources (relative to buildroot).
    sources_by_target = self._compute_sources_by_target(relevant_targets)

    # Invalidation check. Everything inside the with block must succeed for the
    # invalid targets to become valid.
    with self.invalidated(relevant_targets,
                          invalidate_dependents=True,
                          partition_size_hint=self._partition_size_hint) as invalidation_check:
      if invalidation_check.invalid_vts and not self.dry_run:
        # The analysis for invalid and deleted sources is no longer valid.
        invalid_targets = [vt.target for vt in invalidation_check.invalid_vts]
        invalid_sources_by_target = {}
        for tgt in invalid_targets:
          invalid_sources_by_target[tgt] = sources_by_target[tgt]
        invalid_sources = list(itertools.chain.from_iterable(invalid_sources_by_target.values()))
        deleted_sources = self._deleted_sources()

        # Work in a tmpdir so we don't stomp the main analysis files on error.
        # The tmpdir is cleaned up in a shutdown hook, because background work
        # may need to access files we create here even after this method returns.
        self._ensure_analysis_tmpdir()
        tmpdir = os.path.join(self._analysis_tmpdir, str(uuid.uuid4()))
        os.mkdir(tmpdir)
        valid_analysis_tmp = os.path.join(tmpdir, 'valid_analysis')
        newly_invalid_analysis_tmp = os.path.join(tmpdir, 'newly_invalid_analysis')
        invalid_analysis_tmp = os.path.join(tmpdir, 'invalid_analysis')
        if self._analysis_parser.is_nonempty_analysis(self._analysis_file):
          with self.context.new_workunit(name='prepare-analysis'):
            self._analysis_tools.split_to_paths(self._analysis_file,
              [(invalid_sources + deleted_sources, newly_invalid_analysis_tmp)], valid_analysis_tmp)
            if self._analysis_parser.is_nonempty_analysis(self._invalid_analysis_file):
              self._analysis_tools.merge_from_paths(
                [self._invalid_analysis_file, newly_invalid_analysis_tmp], invalid_analysis_tmp)
            else:
              invalid_analysis_tmp = newly_invalid_analysis_tmp

            # Now it's OK to overwrite the main analysis files with the new state.
            self.move(valid_analysis_tmp, self._analysis_file)
            self.move(invalid_analysis_tmp, self._invalid_analysis_file)

        # Register products for all the valid targets.
        # We register as we go, so dependency checking code can use this data.
        valid_targets = list(set(relevant_targets) - set(invalid_targets))
        self._register_products(valid_targets, sources_by_target, self._analysis_file)

        # Figure out the sources and analysis belonging to each partition.
        partitions = []  # Each element is a triple (vts, sources_by_target, analysis).
        for vts in invalidation_check.invalid_vts_partitioned:
          partition_tmpdir = os.path.join(tmpdir, Target.maybe_readable_identify(vts.targets))
          os.mkdir(partition_tmpdir)
          sources = list(itertools.chain.from_iterable(
            [invalid_sources_by_target.get(t, []) for t in vts.targets]))
          analysis_file = os.path.join(partition_tmpdir, 'analysis')
          partitions.append((vts, sources, analysis_file))

        # Split per-partition files out of the global invalid analysis.
        if self._analysis_parser.is_nonempty_analysis(self._invalid_analysis_file) and partitions:
          with self.context.new_workunit(name='partition-analysis'):
            splits = [(x[1], x[2]) for x in partitions]
            self._analysis_tools.split_to_paths(self._invalid_analysis_file, splits)

        # Now compile partitions one by one.
        for partition in partitions:
          (vts, sources, analysis_file) = partition
          cp_entries = [entry for conf, entry in classpath if conf in self._confs]
          self._process_target_partition(partition, cp_entries)
          # No exception was thrown, therefore the compile succeded and analysis_file is now valid.
          if os.path.exists(analysis_file):  # The compilation created an analysis.
            # Merge the newly-valid analysis with our global valid analysis.
            new_valid_analysis = analysis_file + '.valid.new'
            if self._analysis_parser.is_nonempty_analysis(self._analysis_file):
#.........这里部分代码省略.........
开发者ID:CodeWarltz,项目名称:commons,代码行数:103,代码来源:jvm_compile.py

示例12: identify

# 需要导入模块: from twitter.pants.base.target import Target [as 别名]
# 或者: from twitter.pants.base.target.Target import maybe_readable_identify [as 别名]
 def identify(self, targets):
   targets = list(targets)
   if len(targets) == 1 and hasattr(targets[0], 'provides') and targets[0].provides:
     return targets[0].provides.org, targets[0].provides.name
   else:
     return 'internal', Target.maybe_readable_identify(targets)
开发者ID:dynamicguy,项目名称:commons,代码行数:8,代码来源:ivy_utils.py


注:本文中的twitter.pants.base.target.Target.maybe_readable_identify方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。