当前位置: 首页>>代码示例>>Python>>正文


Python Dependencies.findclasses方法代码示例

本文整理汇总了Python中twitter.pants.tasks.jvm_compiler_dependencies.Dependencies.findclasses方法的典型用法代码示例。如果您正苦于以下问题:Python Dependencies.findclasses方法的具体用法?Python Dependencies.findclasses怎么用?Python Dependencies.findclasses使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在twitter.pants.tasks.jvm_compiler_dependencies.Dependencies的用法示例。


在下文中一共展示了Dependencies.findclasses方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: execute_single_compilation

# 需要导入模块: from twitter.pants.tasks.jvm_compiler_dependencies import Dependencies [as 别名]
# 或者: from twitter.pants.tasks.jvm_compiler_dependencies.Dependencies import findclasses [as 别名]
  def execute_single_compilation(self, vt, cp):
    depfile = self.create_depfile_path(vt.targets)

    self.merge_depfile(vt)  # Get what we can from previous builds.
    sources_by_target, fingerprint = self.calculate_sources(vt.targets)
    if sources_by_target:
      sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values())
      if not sources:
        self.context.log.warn('Skipping java compile for targets with no sources:\n  %s' %
                              '\n  '.join(str(t) for t in sources_by_target.keys()))
      else:
        classpath = [jar for conf, jar in cp if conf in self._confs]
        result = self.compile(classpath, sources, fingerprint, depfile)
        if result != 0:
          default_message = 'Unexpected error - %s returned %d' % (_JMAKE_MAIN, result)
          raise TaskError(_JMAKE_ERROR_CODES.get(result, default_message))
        self.split_depfile(vt)

      all_artifact_files = [depfile]

      if self._artifact_cache and self.context.options.write_to_artifact_cache:
        deps = Dependencies(self._classes_dir)
        deps.load(depfile)
        vts_artifactfile_pairs = []
        for single_vt in vt.versioned_targets:
          per_target_depfile = self.create_depfile_path([single_vt.target])
          per_target_artifact_files = [per_target_depfile]
          for _, classes_by_source in deps.findclasses([single_vt.target]).items():
            for _, classes in classes_by_source.items():
              classfile_paths = [os.path.join(self._classes_dir, cls) for cls in classes]
              per_target_artifact_files.extend(classfile_paths)
              all_artifact_files.extend(classfile_paths)
            vts_artifactfile_pairs.append((single_vt, per_target_artifact_files))
        vts_artifactfile_pairs.append((vt, all_artifact_files))
        self.update_artifact_cache(vts_artifactfile_pairs)
开发者ID:wickman,项目名称:commons,代码行数:37,代码来源:java_compile.py

示例2: _post_process

# 需要导入模块: from twitter.pants.tasks.jvm_compiler_dependencies import Dependencies [as 别名]
# 或者: from twitter.pants.tasks.jvm_compiler_dependencies.Dependencies import findclasses [as 别名]
  def _post_process(self, target, cp):
    """Must be called on all targets, whether they needed compilation or not."""
    classes_dir, depfile, _ = self._output_paths([target])

    # Update the classpath, for the benefit of tasks downstream from us.
    if os.path.exists(classes_dir):
      for conf in self._confs:
        cp.insert(0, (conf, classes_dir))

    # Make note of the classes generated by this target.
    if os.path.exists(depfile) and self.context.products.isrequired('classes'):
      self.context.log.debug('Reading dependencies from ' + depfile)
      deps = Dependencies(classes_dir)
      deps.load(depfile)
      genmap = self.context.products.get('classes')
      for classes_by_source in deps.findclasses([target]).values():
        for source, classes in classes_by_source.items():
          genmap.add(source, classes_dir, classes)
          genmap.add(target, classes_dir, classes)

          # TODO(John Sirois): Map target.resources in the same way
          # Create and Map scala plugin info files to the owning targets.
        if is_scalac_plugin(target) and target.classname:
          basedir, plugin_info_file = self._zinc_utils.write_plugin_info(self._resources_dir, target)
          genmap.add(target, basedir, [plugin_info_file])
开发者ID:bag-of-projects,项目名称:commons,代码行数:27,代码来源:scala_compile.py

示例3: _merge_artifact

# 需要导入模块: from twitter.pants.tasks.jvm_compiler_dependencies import Dependencies [as 别名]
# 或者: from twitter.pants.tasks.jvm_compiler_dependencies.Dependencies import findclasses [as 别名]
  def _merge_artifact(self, versioned_target_set):
    """Merges artifacts representing the individual targets in a VersionedTargetSet into one artifact for that set.
    Creates an output classes dir, depfile and analysis file for the VersionedTargetSet.
    Note that the merged artifact may be incomplete (e.g., if we have no previous artifacts for some of the
    individual targets). That's OK: We run this right before we invoke zinc, which will fill in what's missing.
    This method is not required for correctness, only for efficiency: it can prevent zinc from doing superfluous work.

    NOTE: This method is reentrant.
    """
    if len(versioned_target_set.targets) <= 1:
      return  # Nothing to do.

    with temporary_dir() as tmpdir:
      dst_classes_dir, dst_depfile, dst_analysis_file = self._output_paths(versioned_target_set.targets)
      safe_rmtree(dst_classes_dir)
      safe_mkdir(dst_classes_dir)
      src_analysis_files = []

      # TODO: Do we actually need to merge deps? Zinc will stomp them anyway on success.
      dst_deps = Dependencies(dst_classes_dir)

      for target in versioned_target_set.targets:
        src_classes_dir, src_depfile, src_analysis_file = self._output_paths([target])
        if os.path.exists(src_depfile):
          src_deps = Dependencies(src_classes_dir)
          src_deps.load(src_depfile)
          dst_deps.merge(src_deps)

          classes_by_source = src_deps.findclasses([target]).get(target, {})
          for source, classes in classes_by_source.items():
            for cls in classes:
              src = os.path.join(src_classes_dir, cls)
              dst = os.path.join(dst_classes_dir, cls)
              # src may not exist if we aborted a build in the middle. That's OK: zinc will notice that
              # it's missing and rebuild it.
              # dst may already exist if we have overlapping targets. It's not a good idea
              # to have those, but until we enforce it, we must allow it here.
              if os.path.exists(src) and not os.path.exists(dst):
                # Copy the class file.
                safe_mkdir(os.path.dirname(dst))
                os.link(src, dst)

          # Rebase a copy of the per-target analysis files to reflect the merged classes dir.
          if os.path.exists(src_analysis_file):
            src_analysis_file_tmp = \
            os.path.join(tmpdir, os.path.relpath(src_analysis_file, self._analysis_files_base))
            shutil.copyfile(src_analysis_file, src_analysis_file_tmp)
            src_analysis_files.append(src_analysis_file_tmp)
            if self._zinc_utils.run_zinc_rebase(src_analysis_file_tmp, [(src_classes_dir, dst_classes_dir)]):
              self.context.log.warn('In merge_artifact: zinc failed to rebase analysis file %s. '\
                                    'Target may require a full rebuild.' %\
                                    src_analysis_file_tmp)

      dst_deps.save(dst_depfile)

      if self._zinc_utils.run_zinc_merge(src_analysis_files, dst_analysis_file):
        self.context.log.warn('zinc failed to merge analysis files %s to %s. '\
                              'Target may require a full rebuild.' %\
                             (':'.join(src_analysis_files), dst_analysis_file))
开发者ID:bag-of-projects,项目名称:commons,代码行数:61,代码来源:scala_compile.py

示例4: merge_artifact

# 需要导入模块: from twitter.pants.tasks.jvm_compiler_dependencies import Dependencies [as 别名]
# 或者: from twitter.pants.tasks.jvm_compiler_dependencies.Dependencies import findclasses [as 别名]
  def merge_artifact(self, versioned_target_set):
    if len(versioned_target_set.targets) <= 1:
      return

    with temporary_dir() as tmpdir:
      dst_output_dir, dst_depfile, dst_analysis_cache = self.create_output_paths(versioned_target_set.targets)
      safe_rmtree(dst_output_dir)
      safe_mkdir(dst_output_dir)
      src_analysis_caches = []

      # TODO: Do we actually need to merge deps? Zinc will stomp them anyway on success.
      dst_deps = Dependencies(dst_output_dir)

      for target in versioned_target_set.targets:
        src_output_dir, src_depfile, src_analysis_cache = self.create_output_paths([target])
        if os.path.exists(src_depfile):
          src_deps = Dependencies(src_output_dir)
          src_deps.load(src_depfile)
          dst_deps.merge(src_deps)

          classes_by_source = src_deps.findclasses([target]).get(target, {})
          for source, classes in classes_by_source.items():
            for cls in classes:
              src = os.path.join(src_output_dir, cls)
              dst = os.path.join(dst_output_dir, cls)
              # src may not exist if we aborted a build in the middle. That's OK: zinc will notice that
              # it's missing and rebuild it.
              # dst may already exist if we have overlapping targets. It's not a good idea
              # to have those, but until we enforce it, we must allow it here.
              if os.path.exists(src) and not os.path.exists(dst):
                # Copy the class file.
                safe_mkdir(os.path.dirname(dst))
                os.link(src, dst)

          # Rebase a copy of the per-target analysis files prior to merging.
          if os.path.exists(src_analysis_cache):
            src_analysis_cache_tmp = \
              os.path.join(tmpdir, os.path.relpath(src_analysis_cache, self._analysis_cache_dir))
            shutil.copyfile(src_analysis_cache, src_analysis_cache_tmp)
            src_analysis_caches.append(src_analysis_cache_tmp)
            if self._zinc_utils.run_zinc_rebase(cache=src_analysis_cache_tmp, rebasings=[(src_output_dir, dst_output_dir)]):
              self.context.log.warn('In merge_artifact: zinc failed to rebase analysis file %s. ' \
                                    'Target may require a full rebuild.' % \
                                    src_analysis_cache_tmp)

      dst_deps.save(dst_depfile)

      if self._zinc_utils.run_zinc_merge(src_caches=src_analysis_caches, dst_cache=dst_analysis_cache):
        self.context.log.warn('zinc failed to merge analysis files %s to %s. ' \
                              'Target may require a full rebuild.' % \
                              (':'.join(src_analysis_caches), dst_analysis_cache))
开发者ID:forestlzj,项目名称:commons,代码行数:53,代码来源:scala_compile.py

示例5: split_depfile

# 需要导入模块: from twitter.pants.tasks.jvm_compiler_dependencies import Dependencies [as 别名]
# 或者: from twitter.pants.tasks.jvm_compiler_dependencies.Dependencies import findclasses [as 别名]
  def split_depfile(self, vt):
    depfile = self.create_depfile_path(vt.targets)
    if len(vt.targets) <= 1 or not os.path.exists(depfile) or self.dry_run:
      return

    deps = Dependencies(self._classes_dir)
    deps.load(depfile)

    classes_by_source_by_target = deps.findclasses(vt.targets)
    for target in vt.targets:
      classes_by_source = classes_by_source_by_target.get(target, {})
      dst_depfile = self.create_depfile_path([target])
      dst_deps = Dependencies(self._classes_dir)
      for source, classes in classes_by_source.items():
        src = os.path.join(target.target_base, source)
        dst_deps.add(src, classes)
      dst_deps.save(dst_depfile)
开发者ID:wickman,项目名称:commons,代码行数:19,代码来源:java_compile.py

示例6: post_process

# 需要导入模块: from twitter.pants.tasks.jvm_compiler_dependencies import Dependencies [as 别名]
# 或者: from twitter.pants.tasks.jvm_compiler_dependencies.Dependencies import findclasses [as 别名]
  def post_process(self, vt, upstream_analysis_caches, split_artifact):
    output_dir, depfile, analysis_cache = self.create_output_paths(vt.targets)
    if not self.dry_run:
      # Read in the deps created either just now or by a previous compiler run on these targets.
      if os.path.exists(depfile):
        self.context.log.debug('Reading dependencies from ' + depfile)
        deps = Dependencies(output_dir)
        deps.load(depfile)

        if split_artifact:
          self.split_artifact(deps, vt)

        if self.context.products.isrequired('classes') :
          genmap = self.context.products.get('classes')
          for target, classes_by_source in deps.findclasses(vt.targets).items():
            for source, classes in classes_by_source.items():
              genmap.add(source, output_dir, classes)
              genmap.add(target, output_dir, classes)

          # TODO(John Sirois): Map target.resources in the same way
          # Create and Map scala plugin info files to the owning targets.
          for target in vt.targets:
            if is_scalac_plugin(target) and target.classname:
              basedir = self.write_plugin_info(target)
              genmap.add(target, basedir, [_PLUGIN_INFO_FILE])

    # Update the upstream analysis map.
    if os.path.exists(analysis_cache):
      analysis_cache_parts = os.path.split(analysis_cache)
      if not upstream_analysis_caches.has(output_dir):
        # A previous chunk might have already updated this. It is certainly possible for a later chunk to
        # independently depend on some target that a previous chunk already built.
        upstream_analysis_caches.add(output_dir, analysis_cache_parts[0], [ analysis_cache_parts[1] ])

    # Update the classpath.
    with self.context.state('classpath', []) as cp:
      for conf in self._confs:
        cp.insert(0, (conf, output_dir))
开发者ID:lxwuchang,项目名称:commons,代码行数:40,代码来源:scala_compile.py

示例7: JavaCompile

# 需要导入模块: from twitter.pants.tasks.jvm_compiler_dependencies import Dependencies [as 别名]
# 或者: from twitter.pants.tasks.jvm_compiler_dependencies.Dependencies import findclasses [as 别名]
class JavaCompile(NailgunTask):
  @classmethod
  def setup_parser(cls, option_group, args, mkflag):
    NailgunTask.setup_parser(option_group, args, mkflag)

    option_group.add_option(mkflag("warnings"), mkflag("warnings", negate=True),
                            dest="java_compile_warnings", default=True,
                            action="callback", callback=mkflag.set_bool,
                            help="[%default] Compile java code with all configured warnings "
                                 "enabled.")

    option_group.add_option(mkflag("args"), dest="java_compile_args", action="append",
                            help="Pass these extra args to javac.")

    option_group.add_option(mkflag("partition-size-hint"), dest="java_compile_partition_size_hint",
                            action="store", type="int", default=-1,
                            help="Roughly how many source files to attempt to compile together. Set"
                                 " to a large number to compile all sources together. Set this to 0"
                                 " to compile target-by-target. Default is set in pants.ini.")

  def __init__(self, context):
    NailgunTask.__init__(self, context, workdir=context.config.get('java-compile', 'nailgun_dir'))

    if context.options.java_compile_partition_size_hint != -1:
      self._partition_size_hint = context.options.java_compile_partition_size_hint
    else:
      self._partition_size_hint = context.config.getint('java-compile', 'partition_size_hint',
                                                        default=1000)

    workdir = context.config.get('java-compile', 'workdir')
    self._classes_dir = os.path.join(workdir, 'classes')
    self._resources_dir = os.path.join(workdir, 'resources')
    self._depfile_dir = os.path.join(workdir, 'depfiles')
    self._deps = Dependencies(self._classes_dir)

    self._jmake_profile = context.config.get('java-compile', 'jmake-profile')
    self._compiler_profile = context.config.get('java-compile', 'compiler-profile')

    self._opts = context.config.getlist('java-compile', 'args')
    self._jvm_args = context.config.getlist('java-compile', 'jvm_args')

    self._javac_opts = []
    if context.options.java_compile_args:
      for arg in context.options.java_compile_args:
        self._javac_opts.extend(shlex.split(arg))
    else:
      self._javac_opts.extend(context.config.getlist('java-compile', 'javac_args', default=[]))

    if context.options.java_compile_warnings:
      self._opts.extend(context.config.getlist('java-compile', 'warning_args'))
    else:
      self._opts.extend(context.config.getlist('java-compile', 'no_warning_args'))

    self._confs = context.config.getlist('java-compile', 'confs')

    # The artifact cache to read from/write to.
    artifact_cache_spec = context.config.getlist('java-compile', 'artifact_caches')
    self.setup_artifact_cache(artifact_cache_spec)

  def product_type(self):
    return 'classes'

  def can_dry_run(self):
    return True

  def execute(self, targets):
    java_targets = filter(_is_java, targets)
    if java_targets:
      safe_mkdir(self._classes_dir)
      safe_mkdir(self._depfile_dir)

      with self.context.state('classpath', []) as cp:
        for conf in self._confs:
          cp.insert(0, (conf, self._resources_dir))
          cp.insert(0, (conf, self._classes_dir))

      with self.invalidated(java_targets, invalidate_dependents=True,
                            partition_size_hint=self._partition_size_hint) as invalidation_check:
        for vt in invalidation_check.invalid_vts_partitioned:
          # Compile, using partitions for efficiency.
          self.execute_single_compilation(vt, cp)
          if not self.dry_run:
            vt.update()

        for vt in invalidation_check.all_vts:
          depfile = self.create_depfile_path(vt.targets)
          if not self.dry_run and os.path.exists(depfile):
            # Read in the deps created either just now or by a previous run on these targets.
            deps = Dependencies(self._classes_dir)
            deps.load(depfile)
            self._deps.merge(deps)

      if not self.dry_run:
        if self.context.products.isrequired('classes'):
          genmap = self.context.products.get('classes')
          # Map generated classes to the owning targets and sources.
          for target, classes_by_source in self._deps.findclasses(java_targets).items():
            for source, classes in classes_by_source.items():
              genmap.add(source, self._classes_dir, classes)
              genmap.add(target, self._classes_dir, classes)
#.........这里部分代码省略.........
开发者ID:wickman,项目名称:commons,代码行数:103,代码来源:java_compile.py

示例8: merge_artifact

# 需要导入模块: from twitter.pants.tasks.jvm_compiler_dependencies import Dependencies [as 别名]
# 或者: from twitter.pants.tasks.jvm_compiler_dependencies.Dependencies import findclasses [as 别名]
  def merge_artifact(self, versioned_target_set):
    if len(versioned_target_set.targets) <= 1:
      return

    with temporary_dir() as tmpdir:
      dst_output_dir, dst_depfile, dst_analysis_cache = self.create_output_paths(versioned_target_set.targets)
      safe_rmtree(dst_output_dir)
      safe_mkdir(dst_output_dir)
      src_analysis_caches = []

      analysis_args = []
      analysis_args.extend(self._zinc_jar_args)
      analysis_args.extend([
        '-log-level', self.context.options.log_level or 'info',
        '-analysis',
        ])

      # TODO: Do we actually need to merge deps? Zinc will stomp them anyway on success.
      dst_deps = Dependencies(dst_output_dir)

      for target in versioned_target_set.targets:
        src_output_dir, src_depfile, src_analysis_cache = self.create_output_paths([target])
        if os.path.exists(src_depfile):
          src_deps = Dependencies(src_output_dir)
          src_deps.load(src_depfile)
          dst_deps.merge(src_deps)

          classes_by_source = src_deps.findclasses([target]).get(target, {})
          for source, classes in classes_by_source.items():
            for cls in classes:
              src = os.path.join(src_output_dir, cls)
              dst = os.path.join(dst_output_dir, cls)
              # src may not exist if we aborted a build in the middle. That's OK: zinc will notice that
              # it's missing and rebuild it.
              # dst may already exist if we have overlapping targets. It's not a good idea
              # to have those, but until we enforce it, we must allow it here.
              if os.path.exists(src) and not os.path.exists(dst):
                # Copy the class file.
                safe_mkdir(os.path.dirname(dst))
                os.link(src, dst)

          # Use zinc to rebase a copy of the per-target analysis files prior to merging.
          if os.path.exists(src_analysis_cache):
            src_analysis_cache_tmp = \
              os.path.join(tmpdir, os.path.relpath(src_analysis_cache, self._analysis_cache_dir))
            shutil.copyfile(src_analysis_cache, src_analysis_cache_tmp)
            src_analysis_caches.append(src_analysis_cache_tmp)
            rebase_args = analysis_args + [
              '-cache', src_analysis_cache_tmp,
              '-rebase', '%s:%s' % (src_output_dir, dst_output_dir),
              ]
            if self.runjava(self._main, classpath=self._zinc_classpath, args=rebase_args, jvmargs=self._jvm_args):
              self.context.log.warn('In merge_artifact: zinc failed to rebase analysis file %s. ' \
              'Target may require a full rebuild.' % src_analysis_cache_tmp)

      dst_deps.save(dst_depfile)

      # Use zinc to merge the analysis files.
      merge_args = analysis_args + [
        '-cache', dst_analysis_cache,
        '-merge', ':'.join(src_analysis_caches),
      ]
      if self.runjava(self._main, classpath=self._zinc_classpath, args=merge_args, jvmargs=self._jvm_args):
        raise TaskError, 'zinc failed to merge analysis files %s to %s' % \
                         (':'.join(src_analysis_caches), dst_analysis_cache)
开发者ID:lxwuchang,项目名称:commons,代码行数:67,代码来源:scala_compile.py

示例9: execute_single_compilation

# 需要导入模块: from twitter.pants.tasks.jvm_compiler_dependencies import Dependencies [as 别名]
# 或者: from twitter.pants.tasks.jvm_compiler_dependencies.Dependencies import findclasses [as 别名]
  def execute_single_compilation(self, versioned_target_set, cp, upstream_analysis_caches):
    """Execute a single compilation, updating upstream_analysis_caches if needed."""
    if self._flatten:
      compilation_id = 'flat'
      output_dir = self._flat_classes_dir
    else:
      compilation_id = Target.maybe_readable_identify(versioned_target_set.targets)
      # Each compilation must output to its own directory, so zinc can then associate those with the appropriate
      # analysis caches of previous compilations. We then copy the results out to the real output dir.
      output_dir = os.path.join(self._incremental_classes_dir, compilation_id)

    depfile = os.path.join(self._depfile_dir, compilation_id) + '.dependencies'
    analysis_cache = os.path.join(self._analysis_cache_dir, compilation_id) + '.analysis_cache'

    safe_mkdir(output_dir)

    if not versioned_target_set.valid:
      with self.check_artifact_cache(versioned_target_set,
                                     build_artifacts=[output_dir, depfile, analysis_cache],
                                     artifact_root=self._workdir) as needs_building:
        if needs_building:
          self.context.log.info('Compiling targets %s' % versioned_target_set.targets)
          sources_by_target = self.calculate_sources(versioned_target_set.targets)
          if sources_by_target:
            sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values())
            if not sources:
              touch(depfile)  # Create an empty depfile, since downstream code may assume that one exists.
              self.context.log.warn('Skipping scala compile for targets with no sources:\n  %s' %
                                    '\n  '.join(str(t) for t in sources_by_target.keys()))
            else:
              classpath = [jar for conf, jar in cp if conf in self._confs]
              result = self.compile(classpath, sources, output_dir, analysis_cache, upstream_analysis_caches, depfile)
              if result != 0:
                raise TaskError('%s returned %d' % (self._main, result))

    # Note that the following post-processing steps must happen even for valid targets.

    # Read in the deps created either just now or by a previous compiler run on these targets.
    if self.context.products.isrequired('classes'):
      self.context.log.debug('Reading dependencies from ' + depfile)
      deps = Dependencies(output_dir)
      deps.load(depfile)

      genmap = self.context.products.get('classes')

      for target, classes_by_source in deps.findclasses(versioned_target_set.targets).items():
        for source, classes in classes_by_source.items():
          genmap.add(source, output_dir, classes)
          genmap.add(target, output_dir, classes)

      # TODO(John Sirois): Map target.resources in the same way
      # Create and Map scala plugin info files to the owning targets.
      for target in versioned_target_set.targets:
        if is_scalac_plugin(target) and target.classname:
          basedir = self.write_plugin_info(target)
          genmap.add(target, basedir, [_PLUGIN_INFO_FILE])

    # Update the upstream analysis map.
    analysis_cache_parts = os.path.split(analysis_cache)
    if not upstream_analysis_caches.has(output_dir):
      # A previous chunk might have already updated this. It is certainly possible for a later chunk to
      # independently depend on some target that a previous chunk already built.
      upstream_analysis_caches.add(output_dir, analysis_cache_parts[0], [ analysis_cache_parts[1] ])

    # Update the classpath.
    with self.context.state('classpath', []) as cp:
      for conf in self._confs:
        cp.insert(0, (conf, output_dir))
开发者ID:kevints,项目名称:commons,代码行数:70,代码来源:scala_compile.py

示例10: JavaCompile

# 需要导入模块: from twitter.pants.tasks.jvm_compiler_dependencies import Dependencies [as 别名]
# 或者: from twitter.pants.tasks.jvm_compiler_dependencies.Dependencies import findclasses [as 别名]
class JavaCompile(NailgunTask):
  @staticmethod
  def _has_java_sources(target):
    return is_apt(target) or isinstance(target, JavaLibrary) or isinstance(target, JavaTests)

  @classmethod
  def setup_parser(cls, option_group, args, mkflag):
    NailgunTask.setup_parser(option_group, args, mkflag)

    option_group.add_option(mkflag("warnings"), mkflag("warnings", negate=True),
                            dest="java_compile_warnings", default=True,
                            action="callback", callback=mkflag.set_bool,
                            help="[%default] Compile java code with all configured warnings "
                                 "enabled.")

    option_group.add_option(mkflag("partition-size-hint"), dest="java_compile_partition_size_hint",
      action="store", type="int", default=-1,
      help="Roughly how many source files to attempt to compile together. Set to a large number to compile "\
           "all sources together. Set this to 0 to compile target-by-target. Default is set in pants.ini.")

  def __init__(self, context):
    NailgunTask.__init__(self, context, workdir=context.config.get('java-compile', 'nailgun_dir'))

    self._partition_size_hint = \
      context.options.java_compile_partition_size_hint \
      if context.options.java_compile_partition_size_hint != -1 \
      else context.config.getint('java-compile', 'partition_size_hint')

    workdir = context.config.get('java-compile', 'workdir')
    self._classes_dir = os.path.join(workdir, 'classes')
    self._resources_dir = os.path.join(workdir, 'resources')
    self._depfile_dir = os.path.join(workdir, 'depfiles')
    self._deps = Dependencies(self._classes_dir)

    self._jmake_profile = context.config.get('java-compile', 'jmake-profile')
    self._compiler_profile = context.config.get('java-compile', 'compiler-profile')

    self._args = context.config.getlist('java-compile', 'args')
    self._jvm_args = context.config.getlist('java-compile', 'jvm_args')

    if context.options.java_compile_warnings:
      self._args.extend(context.config.getlist('java-compile', 'warning_args'))
    else:
      self._args.extend(context.config.getlist('java-compile', 'no_warning_args'))

    self._confs = context.config.getlist('java-compile', 'confs')

    # The artifact cache to read from/write to.
    artifact_cache_spec = context.config.getlist('java-compile', 'artifact_caches')
    self.setup_artifact_cache(artifact_cache_spec)

  def product_type(self):
    return 'classes'

  def can_dry_run(self):
    return True

  def execute(self, targets):
    java_targets = filter(JavaCompile._has_java_sources, targets)
    if java_targets:
      safe_mkdir(self._classes_dir)
      safe_mkdir(self._depfile_dir)

      with self.context.state('classpath', []) as cp:
        for conf in self._confs:
          cp.insert(0, (conf, self._resources_dir))
          cp.insert(0, (conf, self._classes_dir))

      with self.invalidated(java_targets, invalidate_dependents=True,
          partition_size_hint=self._partition_size_hint) as invalidation_check:
        for vt in invalidation_check.all_vts:
          if vt.valid:  # Don't compile, just post-process.
            self.post_process(vt)
        for vt in invalidation_check.invalid_vts_partitioned:
          # Compile, using partitions for efficiency.
          self.execute_single_compilation(vt, cp)
          if not self.dry_run:
            vt.update()

      if not self.dry_run:
        if self.context.products.isrequired('classes'):
          genmap = self.context.products.get('classes')

          # Map generated classes to the owning targets and sources.
          for target, classes_by_source in self._deps.findclasses(java_targets).items():
            for source, classes in classes_by_source.items():
              genmap.add(source, self._classes_dir, classes)
              genmap.add(target, self._classes_dir, classes)

          # TODO(John Sirois): Map target.resources in the same way
          # 'Map' (rewrite) annotation processor service info files to the owning targets.
          for target in java_targets:
            if is_apt(target) and target.processors:
              basedir = os.path.join(self._resources_dir, target.id)
              processor_info_file = os.path.join(basedir, _PROCESSOR_INFO_FILE)
              self.write_processor_info(processor_info_file, target.processors)
              genmap.add(target, basedir, [_PROCESSOR_INFO_FILE])

  def execute_single_compilation(self, vt, cp):
    depfile = self.create_depfile_path(vt.targets)
#.........这里部分代码省略.........
开发者ID:samitny,项目名称:commons,代码行数:103,代码来源:java_compile.py

示例11: ScalaCompile

# 需要导入模块: from twitter.pants.tasks.jvm_compiler_dependencies import Dependencies [as 别名]
# 或者: from twitter.pants.tasks.jvm_compiler_dependencies.Dependencies import findclasses [as 别名]
class ScalaCompile(NailgunTask):
  @classmethod
  def setup_parser(cls, option_group, args, mkflag):
    NailgunTask.setup_parser(option_group, args, mkflag)

    option_group.add_option(mkflag("warnings"), mkflag("warnings", negate=True),
                            dest="scala_compile_warnings", default=True,
                            action="callback", callback=mkflag.set_bool,
                            help="[%default] Compile scala code with all configured warnings "
                                 "enabled.")

    option_group.add_option(mkflag("flatten"), mkflag("flatten", negate=True),
                            dest="scala_compile_flatten",
                            action="callback", callback=mkflag.set_bool,
                            help="[%default] Compile scala code for all dependencies in a "
                                 "single compilation.")

  def __init__(self, context):
    NailgunTask.__init__(self, context, workdir=context.config.get('scala-compile', 'nailgun_dir'))

    self._flatten = \
      context.options.scala_compile_flatten if context.options.scala_compile_flatten is not None else \
      context.config.getbool('scala-compile', 'default_to_flatten')

    self._compile_profile = context.config.get('scala-compile', 'compile-profile')  # The target scala version.
    self._zinc_profile = context.config.get('scala-compile', 'zinc-profile')
    self._depemitter_profile = context.config.get('scala-compile', 'dependencies-plugin-profile')

    # All scala targets implicitly depend on the selected scala runtime.
    scaladeps = []
    for spec in context.config.getlist('scala-compile', 'scaladeps'):
      scaladeps.extend(context.resolve(spec))
    for target in context.targets(is_scala):
      target.update_dependencies(scaladeps)

    workdir = context.config.get('scala-compile', 'workdir')
    self._incremental_classes_dir = os.path.join(workdir, 'incremental.classes')
    self._classes_dir = os.path.join(workdir, 'classes')
    self._analysis_cache_dir = os.path.join(workdir, 'analysis_cache')
    self._resources_dir = os.path.join(workdir, 'resources')

    self._main = context.config.get('scala-compile', 'main')

    self._args = context.config.getlist('scala-compile', 'args')
    self._jvm_args = context.config.getlist('scala-compile', 'jvm_args')
    if context.options.scala_compile_warnings:
      self._args.extend(context.config.getlist('scala-compile', 'warning_args'))
    else:
      self._args.extend(context.config.getlist('scala-compile', 'no_warning_args'))

    self._confs = context.config.getlist('scala-compile', 'confs')
    self._depfile_dir = os.path.join(workdir, 'depfiles')
    self._deps = Dependencies(self._classes_dir)

  def invalidate_for(self):
    return [self._flatten]

  def execute(self, targets):
    scala_targets = filter(is_scala, reversed(InternalTarget.sort_targets(targets)))
    if scala_targets:
      safe_mkdir(self._classes_dir)
      safe_mkdir(self._depfile_dir)

      with self.context.state('classpath', []) as cp:
        for conf in self._confs:
          cp.insert(0, (conf, self._resources_dir))
          # If we're not flattening, we don't want the classes dir on the classpath yet, as we want zinc to
          # see only the per-compilation output dirs, so it can map them to analysis caches.
          if self._flatten:
            cp.insert(0, (conf, self._classes_dir))

      if not self._flatten:
        upstream_analysis_caches = OrderedDict()  # output dir -> analysis cache file for the classes in that dir.
        for target in scala_targets:
          self.execute_single_compilation([target], cp, upstream_analysis_caches)
      else:
        self.execute_single_compilation(scala_targets, cp, {})

      if not self._flatten:
        # Now we can add the global output dir, so that subsequent goals can see it.
        with self.context.state('classpath', []) as cp:
          for conf in self._confs:
            cp.insert(0, (conf, self._classes_dir))

      if self.context.products.isrequired('classes'):
        genmap = self.context.products.get('classes')

        # Map generated classes to the owning targets and sources.
        for target, classes_by_source in self._deps.findclasses(scala_targets).items():
          for source, classes in classes_by_source.items():
            genmap.add(source, self._classes_dir, classes)
            genmap.add(target, self._classes_dir, classes)

        # TODO(John Sirois): Map target.resources in the same way
        # Create and Map scala plugin info files to the owning targets.
        for target in scala_targets:
          if is_scalac_plugin(target) and target.classname:
            basedir = self.write_plugin_info(target)
            genmap.add(target, basedir, [_PLUGIN_INFO_FILE])

#.........这里部分代码省略.........
开发者ID:shansun123,项目名称:commons,代码行数:103,代码来源:scala_compile.py

示例12: JavaCompile

# 需要导入模块: from twitter.pants.tasks.jvm_compiler_dependencies import Dependencies [as 别名]
# 或者: from twitter.pants.tasks.jvm_compiler_dependencies.Dependencies import findclasses [as 别名]
class JavaCompile(NailgunTask):
  @staticmethod
  def _is_java(target):
    return is_apt(target) or isinstance(target, JavaLibrary) or isinstance(target, JavaTests)

  @classmethod
  def setup_parser(cls, option_group, args, mkflag):
    NailgunTask.setup_parser(option_group, args, mkflag)

    option_group.add_option(mkflag("warnings"), mkflag("warnings", negate=True),
                            dest="java_compile_warnings", default=True,
                            action="callback", callback=mkflag.set_bool,
                            help="[%default] Compile java code with all configured warnings "
                                 "enabled.")

    option_group.add_option(mkflag("flatten"), mkflag("flatten", negate=True),
                            dest="java_compile_flatten",
                            action="callback", callback=mkflag.set_bool,
                            help="[%default] Compile java code for all dependencies in a "
                                 "single compilation.")

  def __init__(self, context):
    NailgunTask.__init__(self, context, workdir=context.config.get('java-compile', 'nailgun_dir'))

    self._flatten = \
      context.options.java_compile_flatten if context.options.java_compile_flatten is not None else \
      context.config.getbool('java-compile', 'default_to_flatten')

    workdir = context.config.get('java-compile', 'workdir')
    self._classes_dir = os.path.join(workdir, 'classes')
    self._resources_dir = os.path.join(workdir, 'resources')
    self._depfile_dir = os.path.join(workdir, 'depfiles')
    self._deps = Dependencies(self._classes_dir)

    self._jmake_profile = context.config.get('java-compile', 'jmake-profile')
    self._compiler_profile = context.config.get('java-compile', 'compiler-profile')

    self._args = context.config.getlist('java-compile', 'args')
    self._jvm_args = context.config.getlist('java-compile', 'jvm_args')

    if context.options.java_compile_warnings:
      self._args.extend(context.config.getlist('java-compile', 'warning_args'))
    else:
      self._args.extend(context.config.getlist('java-compile', 'no_warning_args'))

    self._confs = context.config.getlist('java-compile', 'confs')

  def execute(self, targets):
    java_targets = filter(JavaCompile._is_java, reversed(InternalTarget.sort_targets(targets)))
    if java_targets:
      safe_mkdir(self._classes_dir)
      safe_mkdir(self._depfile_dir)

      with self.context.state('classpath', []) as cp:
        for conf in self._confs:
          cp.insert(0, (conf, self._resources_dir))
          cp.insert(0, (conf, self._classes_dir))

      if not self._flatten:
        for target in java_targets:
          self.execute_single_compilation([target], cp)
      else:
        self.execute_single_compilation(java_targets, cp)

      if self.context.products.isrequired('classes'):
        genmap = self.context.products.get('classes')

        # Map generated classes to the owning targets and sources.
        for target, classes_by_source in self._deps.findclasses(java_targets).items():
          for source, classes in classes_by_source.items():
            genmap.add(source, self._classes_dir, classes)
            genmap.add(target, self._classes_dir, classes)

        # TODO(John Sirois): Map target.resources in the same way
        # 'Map' (rewrite) annotation processor service info files to the owning targets.
        for target in java_targets:
          if is_apt(target) and target.processors:
            basedir = os.path.join(self._resources_dir, target.id)
            processor_info_file = os.path.join(basedir, _PROCESSOR_INFO_FILE)
            self.write_processor_info(processor_info_file, target.processors)
            genmap.add(target, basedir, [_PROCESSOR_INFO_FILE])

  def execute_single_compilation(self, java_targets, cp):
    self.context.log.info('Compiling targets %s' % str(java_targets))

    # Compute the id of this compilation. We try to make it human-readable.
    if len(java_targets) == 1:
      compilation_id = java_targets[0].id
    else:
      compilation_id = self.context.identify(java_targets)

    if self._flatten:
      # If compiling in flat mode, we let all dependencies aggregate into a single well-known depfile. This
      # allows us to build different targets in different invocations without losing dependency information
      # from any of them.
      depfile = os.path.join(self._depfile_dir, 'dependencies.flat')
    else:
      # If not in flat mode, we let each compilation have its own depfile, to avoid quadratic behavior (each
      # compilation will read in the entire depfile, add its stuff to it and write it out again).
      depfile = os.path.join(self._depfile_dir, compilation_id) + '.dependencies'
#.........这里部分代码省略.........
开发者ID:rodzyn0688,项目名称:commons,代码行数:103,代码来源:java_compile.py


注:本文中的twitter.pants.tasks.jvm_compiler_dependencies.Dependencies.findclasses方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。