当前位置: 首页>>代码示例>>Python>>正文


Python jvm_compiler_dependencies.Dependencies类代码示例

本文整理汇总了Python中twitter.pants.tasks.jvm_compiler_dependencies.Dependencies的典型用法代码示例。如果您正苦于以下问题:Python Dependencies类的具体用法?Python Dependencies怎么用?Python Dependencies使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Dependencies类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _write_to_artifact_cache

  def _write_to_artifact_cache(self, vts, sources_by_target):
    self._ensure_depfile_tmpdir()
    vt_by_target = dict([(vt.target, vt) for vt in vts.versioned_targets])

    # This work can happen in the background, if there's a measurable benefit to that.

    # Split the depfile into per-target files.
    splits = [(sources, JavaCompile.create_depfile_path(self._depfile_tmpdir, [target]))
              for target, sources in sources_by_target.items()]
    deps = Dependencies(self._classes_dir)
    if os.path.exists(self._depfile):
      deps.load(self._depfile)
    deps.split(splits)

    # Gather up the artifacts.
    vts_artifactfiles_pairs = []
    for target, sources in sources_by_target.items():
      artifacts = [JavaCompile.create_depfile_path(self._depfile_tmpdir, [target])]
      for source in sources:
        for cls in deps.classes_by_source.get(source, []):
          artifacts.append(os.path.join(self._classes_dir, cls))
      vt = vt_by_target.get(target)
      if vt is not None:
        vts_artifactfiles_pairs.append((vt, artifacts))

    # Write to the artifact cache.
    self.update_artifact_cache(vts_artifactfiles_pairs)
开发者ID:bollwang,项目名称:commons,代码行数:27,代码来源:java_compile.py

示例2: execute_single_compilation

  def execute_single_compilation(self, vt, cp):
    depfile = self.create_depfile_path(vt.targets)

    self.merge_depfile(vt)  # Get what we can from previous builds.
    sources_by_target, fingerprint = self.calculate_sources(vt.targets)
    if sources_by_target:
      sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values())
      if not sources:
        self.context.log.warn('Skipping java compile for targets with no sources:\n  %s' %
                              '\n  '.join(str(t) for t in sources_by_target.keys()))
      else:
        classpath = [jar for conf, jar in cp if conf in self._confs]
        result = self.compile(classpath, sources, fingerprint, depfile)
        if result != 0:
          default_message = 'Unexpected error - %s returned %d' % (_JMAKE_MAIN, result)
          raise TaskError(_JMAKE_ERROR_CODES.get(result, default_message))
        self.split_depfile(vt)

      all_artifact_files = [depfile]

      if self._artifact_cache and self.context.options.write_to_artifact_cache:
        deps = Dependencies(self._classes_dir)
        deps.load(depfile)
        vts_artifactfile_pairs = []
        for single_vt in vt.versioned_targets:
          per_target_depfile = self.create_depfile_path([single_vt.target])
          per_target_artifact_files = [per_target_depfile]
          for _, classes_by_source in deps.findclasses([single_vt.target]).items():
            for _, classes in classes_by_source.items():
              classfile_paths = [os.path.join(self._classes_dir, cls) for cls in classes]
              per_target_artifact_files.extend(classfile_paths)
              all_artifact_files.extend(classfile_paths)
            vts_artifactfile_pairs.append((single_vt, per_target_artifact_files))
        vts_artifactfile_pairs.append((vt, all_artifact_files))
        self.update_artifact_cache(vts_artifactfile_pairs)
开发者ID:wickman,项目名称:commons,代码行数:35,代码来源:java_compile.py

示例3: _post_process

  def _post_process(self, target, cp):
    """Must be called on all targets, whether they needed compilation or not."""
    classes_dir, depfile, _ = self._output_paths([target])

    # Update the classpath, for the benefit of tasks downstream from us.
    if os.path.exists(classes_dir):
      for conf in self._confs:
        cp.insert(0, (conf, classes_dir))

    # Make note of the classes generated by this target.
    if os.path.exists(depfile) and self.context.products.isrequired('classes'):
      self.context.log.debug('Reading dependencies from ' + depfile)
      deps = Dependencies(classes_dir)
      deps.load(depfile)
      genmap = self.context.products.get('classes')
      for classes_by_source in deps.findclasses([target]).values():
        for source, classes in classes_by_source.items():
          genmap.add(source, classes_dir, classes)
          genmap.add(target, classes_dir, classes)

          # TODO(John Sirois): Map target.resources in the same way
          # Create and Map scala plugin info files to the owning targets.
        if is_scalac_plugin(target) and target.classname:
          basedir, plugin_info_file = self._zinc_utils.write_plugin_info(self._resources_dir, target)
          genmap.add(target, basedir, [plugin_info_file])
开发者ID:bag-of-projects,项目名称:commons,代码行数:25,代码来源:scala_compile.py

示例4: execute

  def execute(self, targets):
    java_targets = filter(_is_java, targets)
    if java_targets:
      safe_mkdir(self._classes_dir)
      safe_mkdir(self._depfile_dir)

      egroups = self.context.products.get_data('exclusives_groups')
      group_id = egroups.get_group_key_for_target(java_targets[0])
      for conf in self._confs:
        egroups.update_compatible_classpaths(group_id, [(conf, self._resources_dir)])
        egroups.update_compatible_classpaths(group_id, [(conf, self._classes_dir)])


      with self.invalidated(java_targets, invalidate_dependents=True,
                            partition_size_hint=self._partition_size_hint) as invalidation_check:
        for vt in invalidation_check.invalid_vts_partitioned:
          # Compile, using partitions for efficiency.
          exclusives_classpath = egroups.get_classpath_for_group(group_id)
          self.execute_single_compilation(vt, exclusives_classpath)
          if not self.dry_run:
            vt.update()

        for vt in invalidation_check.all_vts:
          depfile = self.create_depfile_path(vt.targets)
          if not self.dry_run and os.path.exists(depfile):
            # Read in the deps created either just now or by a previous run on these targets.
            deps = Dependencies(self._classes_dir)
            deps.load(depfile)
            self._deps.merge(deps)

      if not self.dry_run:
        if self.context.products.isrequired('classes'):
          genmap = self.context.products.get('classes')
          # Map generated classes to the owning targets and sources.
          for target, classes_by_source in self._deps.findclasses(java_targets).items():
            for source, classes in classes_by_source.items():
              genmap.add(source, self._classes_dir, classes)
              genmap.add(target, self._classes_dir, classes)

          # TODO(John Sirois): Map target.resources in the same way
          # 'Map' (rewrite) annotation processor service info files to the owning targets.
          for target in java_targets:
            if is_apt(target) and target.processors:
              basedir = os.path.join(self._resources_dir, Target.maybe_readable_identify([target]))
              processor_info_file = os.path.join(basedir, _PROCESSOR_INFO_FILE)
              self.write_processor_info(processor_info_file, target.processors)
              genmap.add(target, basedir, [_PROCESSOR_INFO_FILE])

        # Produce a monolithic apt processor service info file for further compilation rounds
        # and the unit test classpath.
        all_processors = set()
        for target in java_targets:
          if is_apt(target) and target.processors:
            all_processors.update(target.processors)
        processor_info_file = os.path.join(self._classes_dir, _PROCESSOR_INFO_FILE)
        if os.path.exists(processor_info_file):
          with safe_open(processor_info_file, 'r') as f:
            for processor in f:
              all_processors.add(processor.strip())
        self.write_processor_info(processor_info_file, all_processors)
开发者ID:BabyDuncan,项目名称:commons,代码行数:60,代码来源:java_compile.py

示例5: post_process

 def post_process(self, versioned_targets):
   depfile = self.create_depfile_path(versioned_targets.targets)
   if not self.dry_run and os.path.exists(depfile):
     # Read in the deps created either just now or by a previous compiler run on these targets.
     deps = Dependencies(self._classes_dir)
     deps.load(depfile)
     self.split_depfile(deps, versioned_targets)
     self._deps.merge(deps)
开发者ID:samitny,项目名称:commons,代码行数:8,代码来源:java_compile.py

示例6: split_artifact

  def split_artifact(self, deps, versioned_target_set):
    if len(versioned_target_set.targets) <= 1:
      return
    buildroot = get_buildroot()
    classes_by_source_by_target = deps.findclasses(versioned_target_set.targets)
    src_output_dir, _, src_analysis_cache = self.create_output_paths(versioned_target_set.targets)
    analysis_splits = []  # List of triples of (list of sources, destination output dir, destination analysis cache).
    # for dependency analysis, we need to record the cache files that we create in the split

    for target in versioned_target_set.targets:
      classes_by_source = classes_by_source_by_target.get(target, {})
      dst_output_dir, dst_depfile, dst_analysis_cache = self.create_output_paths([target])
      safe_rmtree(dst_output_dir)
      safe_mkdir(dst_output_dir)

      sources = []
      dst_deps = Dependencies(dst_output_dir)

      for source, classes in classes_by_source.items():
        src = os.path.join(target.target_base, source)
        dst_deps.add(src, classes)
        source_abspath = os.path.join(buildroot, target.target_base, source)
        sources.append(source_abspath)
        for cls in classes:
          # Copy the class file.
          dst = os.path.join(dst_output_dir, cls)
          safe_mkdir(os.path.dirname(dst))
          os.link(os.path.join(src_output_dir, cls), dst)
      dst_deps.save(dst_depfile)
      analysis_splits.append((sources, dst_output_dir, dst_analysis_cache))
      self.generated_caches.add(os.path.join(dst_output_dir, dst_analysis_cache))
    # Use zinc to split the analysis files.
    if os.path.exists(src_analysis_cache):
      analysis_args = []
      analysis_args.extend(self._zinc_jar_args)
      analysis_args.extend([
        '-log-level', self.context.options.log_level or 'info',
        '-analysis',
        '-mirror-analysis'
        ])
      split_args = analysis_args + [
        '-cache', src_analysis_cache,
        '-split', ','.join(['{%s}:%s' % (':'.join(x[0]), x[2]) for x in analysis_splits]),
        ]
      if self.runjava(self._main, classpath=self._zinc_classpath, args=split_args, jvmargs=self._jvm_args):
        raise TaskError, 'zinc failed to split analysis files %s from %s' %\
                         (':'.join([x[2] for x in analysis_splits]), src_analysis_cache)

      # Now rebase the newly created analysis files.
      for split in analysis_splits:
        dst_analysis_cache = split[2]
        if os.path.exists(dst_analysis_cache):
          rebase_args = analysis_args + [
            '-cache', dst_analysis_cache,
            '-rebase', '%s:%s' % (src_output_dir, split[1]),
            ]
          if self.runjava(self._main, classpath=self._zinc_classpath, args=rebase_args, jvmargs=self._jvm_args):
            raise TaskError, 'In split_artifact: zinc failed to rebase analysis file %s' % dst_analysis_cache
开发者ID:SeungEun,项目名称:commons,代码行数:58,代码来源:scala_compile.py

示例7: _split_artifact

  def _split_artifact(self, deps, versioned_target_set):
    """Splits an artifact representing several targets into target-by-target artifacts.
    Creates an output classes dir, a depfile and an analysis file for each target.
    Note that it's not OK to create incomplete artifacts here: this is run *after* a zinc invocation,
    and the expectation is that the result is complete.

    NOTE: This method is reentrant.
    """
    if len(versioned_target_set.targets) <= 1:
      return
    classes_by_source_by_target = deps.findclasses(versioned_target_set.targets)
    src_classes_dir, _, src_analysis_file = self._output_paths(versioned_target_set.targets)

    # Specifies that the list of sources defines a split to the classes dir and analysis file.
    SplitInfo = namedtuple('SplitInfo', ['sources', 'dst_classes_dir', 'dst_analysis_file'])

    analysis_splits = []  # List of SplitInfos.
    portable_analysis_splits = []  # The same, for the portable version of the analysis cache.

    # Prepare the split arguments.
    for target in versioned_target_set.targets:
      classes_by_source = classes_by_source_by_target.get(target, {})
      dst_classes_dir, dst_depfile, dst_analysis_file = self._output_paths([target])
      safe_rmtree(dst_classes_dir)
      safe_mkdir(dst_classes_dir)

      sources = []
      dst_deps = Dependencies(dst_classes_dir)

      for source, classes in classes_by_source.items():
        src = os.path.join(target.target_base, source)
        dst_deps.add(src, classes)
        sources.append(os.path.join(target.target_base, source))
        for cls in classes:
          # Copy the class file.
          dst = os.path.join(dst_classes_dir, cls)
          safe_mkdir(os.path.dirname(dst))
          os.link(os.path.join(src_classes_dir, cls), dst)
      dst_deps.save(dst_depfile)
      analysis_splits.append(SplitInfo(sources, dst_classes_dir, dst_analysis_file))
      portable_analysis_splits.append(SplitInfo(sources, dst_classes_dir, _portable(dst_analysis_file)))

    def do_split(src_analysis_file, splits):
      if os.path.exists(src_analysis_file):
        if self._zinc_utils.run_zinc_split(src_analysis_file, [(x.sources, x.dst_analysis_file) for x in splits]):
          raise TaskError, 'zinc failed to split analysis files %s from %s' %\
                           (':'.join([x.dst_analysis_file for x in splits]), src_analysis_file)
        for split in splits:
          if os.path.exists(split.dst_analysis_file):
            if self._zinc_utils.run_zinc_rebase(split.dst_analysis_file,
                                                [(src_classes_dir, split.dst_classes_dir)]):
              raise TaskError, \
                'In split_artifact: zinc failed to rebase analysis file %s' % split.dst_analysis_file

    # Now rebase the newly created analysis file(s) to reflect the split classes dirs.
    do_split(src_analysis_file, analysis_splits)
    do_split(_portable(src_analysis_file), portable_analysis_splits)
开发者ID:bag-of-projects,项目名称:commons,代码行数:57,代码来源:scala_compile.py

示例8: _compute_classes_by_source

  def _compute_classes_by_source(self, depfile=None):
    """Compute src->classes."""
    if depfile is None:
      depfile = self._depfile

    if not os.path.exists(depfile):
      return {}
    deps = Dependencies(self._classes_dir)
    deps.load(depfile)
    return deps.classes_by_source
开发者ID:bollwang,项目名称:commons,代码行数:10,代码来源:java_compile.py

示例9: _compile

  def _compile(self, versioned_target_set, classpath, upstream_analysis_files):
    """Actually compile some targets.

    May be invoked concurrently on independent target sets.

    Postcondition: The individual targets in versioned_target_set are up-to-date, as if each
                   were compiled individually.
    """
    # Note: We actually compile all the targets in the set in a single zinc call, because
    # compiler invocation overhead is high, but this fact is not exposed outside this method.
    classes_dir, depfile, analysis_file = self._output_paths(versioned_target_set.targets)
    safe_mkdir(classes_dir)

    # Get anything we have from previous builds.
    self._merge_artifact(versioned_target_set)

    # Compute the sources we need to compile.
    sources_by_target = ScalaCompile._calculate_sources(versioned_target_set.targets)

    if sources_by_target:
      sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values())
      if not sources:
        self.context.log.warn('Skipping scala compile for targets with no sources:\n  %s' %
                              '\n  '.join(str(t) for t in sources_by_target.keys()))
      else:
        # Invoke the compiler.
        self.context.log.info('Compiling targets %s' % versioned_target_set.targets)
        if self._zinc_utils.compile(classpath, sources, classes_dir, analysis_file,
                                    upstream_analysis_files, depfile):
          raise TaskError('Compile failed.')

        # Read in the deps we just created.
        self.context.log.debug('Reading dependencies from ' + depfile)
        deps = Dependencies(classes_dir)
        deps.load(depfile)

        # Split the artifact into per-target artifacts.
        self._split_artifact(deps, versioned_target_set)

        # Write to artifact cache, if needed.
        for vt in versioned_target_set.versioned_targets:
          vt_classes_dir, vt_depfile, vt_analysis_file = self._output_paths(vt.targets)
          vt_portable_analysis_file = _portable(vt_analysis_file)
          if self._artifact_cache and self.context.options.write_to_artifact_cache:
            # Relativize the analysis.
            # TODO: Relativize before splitting? This will require changes to Zinc, which currently
            # eliminates paths it doesn't recognize (including our placeholders) when splitting.
            if os.path.exists(vt_analysis_file) and \
                self._zinc_utils.relativize_analysis_file(vt_analysis_file, vt_portable_analysis_file):
              raise TaskError('Zinc failed to relativize analysis file: %s' % vt_analysis_file)
            # Write the per-target artifacts to the cache.
            artifacts = [vt_classes_dir, vt_depfile, vt_portable_analysis_file]
            self.update_artifact_cache(vt, artifacts)
          else:
            safe_rmtree(vt_portable_analysis_file)  # Don't leave cruft lying around.
开发者ID:bag-of-projects,项目名称:commons,代码行数:55,代码来源:scala_compile.py

示例10: execute

    def execute(self, targets):
        java_targets = [t for t in targets if t.has_sources(".java")]

        if not java_targets:
            return

        # Get the exclusives group for the targets to compile.
        # Group guarantees that they'll be a single exclusives key for them.
        egroups = self.context.products.get_data("exclusives_groups")
        group_id = egroups.get_group_key_for_target(java_targets[0])

        # Add classes and resource dirs to the classpath for us and for downstream tasks.
        for conf in self._confs:
            egroups.update_compatible_classpaths(group_id, [(conf, self._classes_dir)])
            egroups.update_compatible_classpaths(group_id, [(conf, self._resources_dir)])

        # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
        cp = egroups.get_classpath_for_group(group_id)

        with self.invalidated(
            java_targets, invalidate_dependents=True, partition_size_hint=self._partition_size_hint
        ) as invalidation_check:
            if not self.dry_run:
                for vts in invalidation_check.invalid_vts_partitioned:
                    # Compile, using partitions for efficiency.
                    sources_by_target = self._process_target_partition(vts, cp)

                    # TODO: Check for missing dependencies.  See ScalaCompile for an example.
                    # Will require figuring out what the actual deps of a class file are.

                    vts.update()
                    if self.artifact_cache_writes_enabled():
                        self._write_to_artifact_cache(vts, sources_by_target)

                # Provide the target->class and source->class mappings to downstream tasks if needed.
                if self.context.products.isrequired("classes"):
                    if os.path.exists(self._depfile):
                        sources_by_target = self._compute_sources_by_target(java_targets)
                        deps = Dependencies(self._classes_dir)
                        deps.load(self._depfile)
                        self._add_all_products_to_genmap(sources_by_target, deps.classes_by_source)

                # Produce a monolithic apt processor service info file for further compilation rounds
                # and the unit test classpath.
                all_processors = set()
                for target in java_targets:
                    if target.is_apt and target.processors:
                        all_processors.update(target.processors)
                processor_info_file = os.path.join(self._classes_dir, _PROCESSOR_INFO_FILE)
                if os.path.exists(processor_info_file):
                    with safe_open(processor_info_file, "r") as f:
                        for processor in f:
                            all_processors.add(processor.strip())
                self.write_processor_info(processor_info_file, all_processors)
开发者ID:dynamicguy,项目名称:commons,代码行数:54,代码来源:java_compile.py

示例11: _merge_artifact

  def _merge_artifact(self, versioned_target_set):
    """Merges artifacts representing the individual targets in a VersionedTargetSet into one artifact for that set.
    Creates an output classes dir, depfile and analysis file for the VersionedTargetSet.
    Note that the merged artifact may be incomplete (e.g., if we have no previous artifacts for some of the
    individual targets). That's OK: We run this right before we invoke zinc, which will fill in what's missing.
    This method is not required for correctness, only for efficiency: it can prevent zinc from doing superfluous work.

    NOTE: This method is reentrant.
    """
    if len(versioned_target_set.targets) <= 1:
      return  # Nothing to do.

    with temporary_dir() as tmpdir:
      dst_classes_dir, dst_depfile, dst_analysis_file = self._output_paths(versioned_target_set.targets)
      safe_rmtree(dst_classes_dir)
      safe_mkdir(dst_classes_dir)
      src_analysis_files = []

      # TODO: Do we actually need to merge deps? Zinc will stomp them anyway on success.
      dst_deps = Dependencies(dst_classes_dir)

      for target in versioned_target_set.targets:
        src_classes_dir, src_depfile, src_analysis_file = self._output_paths([target])
        if os.path.exists(src_depfile):
          src_deps = Dependencies(src_classes_dir)
          src_deps.load(src_depfile)
          dst_deps.merge(src_deps)

          classes_by_source = src_deps.findclasses([target]).get(target, {})
          for source, classes in classes_by_source.items():
            for cls in classes:
              src = os.path.join(src_classes_dir, cls)
              dst = os.path.join(dst_classes_dir, cls)
              # src may not exist if we aborted a build in the middle. That's OK: zinc will notice that
              # it's missing and rebuild it.
              # dst may already exist if we have overlapping targets. It's not a good idea
              # to have those, but until we enforce it, we must allow it here.
              if os.path.exists(src) and not os.path.exists(dst):
                # Copy the class file.
                safe_mkdir(os.path.dirname(dst))
                os.link(src, dst)

          # Rebase a copy of the per-target analysis files to reflect the merged classes dir.
          if os.path.exists(src_analysis_file):
            src_analysis_file_tmp = \
            os.path.join(tmpdir, os.path.relpath(src_analysis_file, self._analysis_files_base))
            shutil.copyfile(src_analysis_file, src_analysis_file_tmp)
            src_analysis_files.append(src_analysis_file_tmp)
            if self._zinc_utils.run_zinc_rebase(src_analysis_file_tmp, [(src_classes_dir, dst_classes_dir)]):
              self.context.log.warn('In merge_artifact: zinc failed to rebase analysis file %s. '\
                                    'Target may require a full rebuild.' %\
                                    src_analysis_file_tmp)

      dst_deps.save(dst_depfile)

      if self._zinc_utils.run_zinc_merge(src_analysis_files, dst_analysis_file):
        self.context.log.warn('zinc failed to merge analysis files %s to %s. '\
                              'Target may require a full rebuild.' %\
                             (':'.join(src_analysis_files), dst_analysis_file))
开发者ID:bag-of-projects,项目名称:commons,代码行数:59,代码来源:scala_compile.py

示例12: split_depfile

 def split_depfile(self, deps, versioned_target_set):
   if len(versioned_target_set.targets) <= 1:
     return
   classes_by_source_by_target = deps.findclasses(versioned_target_set.targets)
   for target in versioned_target_set.targets:
     classes_by_source = classes_by_source_by_target.get(target, {})
     dst_depfile = self.create_depfile_path([target])
     dst_deps = Dependencies(self._classes_dir)
     for source, classes in classes_by_source.items():
       src = os.path.join(target.target_base, source)
       dst_deps.add(src, classes)
     dst_deps.save(dst_depfile)
开发者ID:samitny,项目名称:commons,代码行数:12,代码来源:java_compile.py

示例13: post_process_cached_vts

 def post_process_cached_vts(cached_vts):
   # Merge the cached analyses into the existing global one.
   if cached_vts:
     with self.context.new_workunit(name='merge-dependencies'):
       global_deps = Dependencies(self._classes_dir)
       if os.path.exists(self._depfile):
         global_deps.load(self._depfile)
       for vt in cached_vts:
         for target in vt.targets:
           depfile = JavaCompile.create_depfile_path(self._depfile_tmpdir, [target])
           if os.path.exists(depfile):
             deps = Dependencies(self._classes_dir)
             deps.load(depfile)
             global_deps.merge(deps)
       global_deps.save(self._depfile)
开发者ID:bollwang,项目名称:commons,代码行数:15,代码来源:java_compile.py

示例14: __init__

    def __init__(self, context):
        NailgunTask.__init__(self, context, workdir=context.config.get("java-compile", "nailgun_dir"))

        self._partition_size_hint = (
            context.options.java_compile_partition_size_hint
            if context.options.java_compile_partition_size_hint != -1
            else context.config.getint("java-compile", "partition_size_hint")
        )

        workdir = context.config.get("java-compile", "workdir")
        self._classes_dir = os.path.join(workdir, "classes")
        self._resources_dir = os.path.join(workdir, "resources")
        self._depfile_dir = os.path.join(workdir, "depfiles")
        self._deps = Dependencies(self._classes_dir)

        self._jmake_profile = context.config.get("java-compile", "jmake-profile")
        self._compiler_profile = context.config.get("java-compile", "compiler-profile")

        self._args = context.config.getlist("java-compile", "args")
        self._jvm_args = context.config.getlist("java-compile", "jvm_args")

        if context.options.java_compile_warnings:
            self._args.extend(context.config.getlist("java-compile", "warning_args"))
        else:
            self._args.extend(context.config.getlist("java-compile", "no_warning_args"))

        self._confs = context.config.getlist("java-compile", "confs")

        # The artifact cache to read from/write to.
        artifact_cache_spec = context.config.getlist("java-compile", "artifact_caches")
        self.setup_artifact_cache(artifact_cache_spec)
开发者ID:ewhauser,项目名称:commons,代码行数:31,代码来源:java_compile.py

示例15: __init__

  def __init__(self, context):
    NailgunTask.__init__(self, context, workdir=context.config.get('java-compile', 'nailgun_dir'))

    self._partition_size_hint = \
      context.options.java_compile_partition_size_hint \
      if context.options.java_compile_partition_size_hint != -1 \
      else context.config.getint('java-compile', 'partition_size_hint')

    workdir = context.config.get('java-compile', 'workdir')
    self._classes_dir = os.path.join(workdir, 'classes')
    self._resources_dir = os.path.join(workdir, 'resources')
    self._depfile_dir = os.path.join(workdir, 'depfiles')
    self._deps = Dependencies(self._classes_dir)

    self._jmake_profile = context.config.get('java-compile', 'jmake-profile')
    self._compiler_profile = context.config.get('java-compile', 'compiler-profile')

    self._args = context.config.getlist('java-compile', 'args')
    self._jvm_args = context.config.getlist('java-compile', 'jvm_args')

    if context.options.java_compile_warnings:
      self._args.extend(context.config.getlist('java-compile', 'warning_args'))
    else:
      self._args.extend(context.config.getlist('java-compile', 'no_warning_args'))

    self._confs = context.config.getlist('java-compile', 'confs')

    # The artifact cache to read from/write to.
    artifact_cache_spec = context.config.getlist('java-compile', 'artifact_caches')
    self.setup_artifact_cache(artifact_cache_spec)
开发者ID:samitny,项目名称:commons,代码行数:30,代码来源:java_compile.py


注:本文中的twitter.pants.tasks.jvm_compiler_dependencies.Dependencies类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。