本文整理汇总了Python中twitter.pants.tasks.jvm_compiler_dependencies.Dependencies.merge方法的典型用法代码示例。如果您正苦于以下问题:Python Dependencies.merge方法的具体用法?Python Dependencies.merge怎么用?Python Dependencies.merge使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类twitter.pants.tasks.jvm_compiler_dependencies.Dependencies
的用法示例。
在下文中一共展示了Dependencies.merge方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _merge_artifact
# 需要导入模块: from twitter.pants.tasks.jvm_compiler_dependencies import Dependencies [as 别名]
# 或者: from twitter.pants.tasks.jvm_compiler_dependencies.Dependencies import merge [as 别名]
def _merge_artifact(self, versioned_target_set):
"""Merges artifacts representing the individual targets in a VersionedTargetSet into one artifact for that set.
Creates an output classes dir, depfile and analysis file for the VersionedTargetSet.
Note that the merged artifact may be incomplete (e.g., if we have no previous artifacts for some of the
individual targets). That's OK: We run this right before we invoke zinc, which will fill in what's missing.
This method is not required for correctness, only for efficiency: it can prevent zinc from doing superfluous work.
NOTE: This method is reentrant.
"""
if len(versioned_target_set.targets) <= 1:
return # Nothing to do.
with temporary_dir() as tmpdir:
dst_classes_dir, dst_depfile, dst_analysis_file = self._output_paths(versioned_target_set.targets)
safe_rmtree(dst_classes_dir)
safe_mkdir(dst_classes_dir)
src_analysis_files = []
# TODO: Do we actually need to merge deps? Zinc will stomp them anyway on success.
dst_deps = Dependencies(dst_classes_dir)
for target in versioned_target_set.targets:
src_classes_dir, src_depfile, src_analysis_file = self._output_paths([target])
if os.path.exists(src_depfile):
src_deps = Dependencies(src_classes_dir)
src_deps.load(src_depfile)
dst_deps.merge(src_deps)
classes_by_source = src_deps.findclasses([target]).get(target, {})
for source, classes in classes_by_source.items():
for cls in classes:
src = os.path.join(src_classes_dir, cls)
dst = os.path.join(dst_classes_dir, cls)
# src may not exist if we aborted a build in the middle. That's OK: zinc will notice that
# it's missing and rebuild it.
# dst may already exist if we have overlapping targets. It's not a good idea
# to have those, but until we enforce it, we must allow it here.
if os.path.exists(src) and not os.path.exists(dst):
# Copy the class file.
safe_mkdir(os.path.dirname(dst))
os.link(src, dst)
# Rebase a copy of the per-target analysis files to reflect the merged classes dir.
if os.path.exists(src_analysis_file):
src_analysis_file_tmp = \
os.path.join(tmpdir, os.path.relpath(src_analysis_file, self._analysis_files_base))
shutil.copyfile(src_analysis_file, src_analysis_file_tmp)
src_analysis_files.append(src_analysis_file_tmp)
if self._zinc_utils.run_zinc_rebase(src_analysis_file_tmp, [(src_classes_dir, dst_classes_dir)]):
self.context.log.warn('In merge_artifact: zinc failed to rebase analysis file %s. '\
'Target may require a full rebuild.' %\
src_analysis_file_tmp)
dst_deps.save(dst_depfile)
if self._zinc_utils.run_zinc_merge(src_analysis_files, dst_analysis_file):
self.context.log.warn('zinc failed to merge analysis files %s to %s. '\
'Target may require a full rebuild.' %\
(':'.join(src_analysis_files), dst_analysis_file))
示例2: merge_artifact
# 需要导入模块: from twitter.pants.tasks.jvm_compiler_dependencies import Dependencies [as 别名]
# 或者: from twitter.pants.tasks.jvm_compiler_dependencies.Dependencies import merge [as 别名]
def merge_artifact(self, versioned_target_set):
if len(versioned_target_set.targets) <= 1:
return
with temporary_dir() as tmpdir:
dst_output_dir, dst_depfile, dst_analysis_cache = self.create_output_paths(versioned_target_set.targets)
safe_rmtree(dst_output_dir)
safe_mkdir(dst_output_dir)
src_analysis_caches = []
# TODO: Do we actually need to merge deps? Zinc will stomp them anyway on success.
dst_deps = Dependencies(dst_output_dir)
for target in versioned_target_set.targets:
src_output_dir, src_depfile, src_analysis_cache = self.create_output_paths([target])
if os.path.exists(src_depfile):
src_deps = Dependencies(src_output_dir)
src_deps.load(src_depfile)
dst_deps.merge(src_deps)
classes_by_source = src_deps.findclasses([target]).get(target, {})
for source, classes in classes_by_source.items():
for cls in classes:
src = os.path.join(src_output_dir, cls)
dst = os.path.join(dst_output_dir, cls)
# src may not exist if we aborted a build in the middle. That's OK: zinc will notice that
# it's missing and rebuild it.
# dst may already exist if we have overlapping targets. It's not a good idea
# to have those, but until we enforce it, we must allow it here.
if os.path.exists(src) and not os.path.exists(dst):
# Copy the class file.
safe_mkdir(os.path.dirname(dst))
os.link(src, dst)
# Rebase a copy of the per-target analysis files prior to merging.
if os.path.exists(src_analysis_cache):
src_analysis_cache_tmp = \
os.path.join(tmpdir, os.path.relpath(src_analysis_cache, self._analysis_cache_dir))
shutil.copyfile(src_analysis_cache, src_analysis_cache_tmp)
src_analysis_caches.append(src_analysis_cache_tmp)
if self._zinc_utils.run_zinc_rebase(cache=src_analysis_cache_tmp, rebasings=[(src_output_dir, dst_output_dir)]):
self.context.log.warn('In merge_artifact: zinc failed to rebase analysis file %s. ' \
'Target may require a full rebuild.' % \
src_analysis_cache_tmp)
dst_deps.save(dst_depfile)
if self._zinc_utils.run_zinc_merge(src_caches=src_analysis_caches, dst_cache=dst_analysis_cache):
self.context.log.warn('zinc failed to merge analysis files %s to %s. ' \
'Target may require a full rebuild.' % \
(':'.join(src_analysis_caches), dst_analysis_cache))
示例3: merge_depfile
# 需要导入模块: from twitter.pants.tasks.jvm_compiler_dependencies import Dependencies [as 别名]
# 或者: from twitter.pants.tasks.jvm_compiler_dependencies.Dependencies import merge [as 别名]
def merge_depfile(self, versioned_target_set):
if len(versioned_target_set.targets) <= 1:
return
dst_depfile = self.create_depfile_path(versioned_target_set.targets)
dst_deps = Dependencies(self._classes_dir)
for target in versioned_target_set.targets:
src_depfile = self.create_depfile_path([target])
if os.path.exists(src_depfile):
src_deps = Dependencies(self._classes_dir)
src_deps.load(src_depfile)
dst_deps.merge(src_deps)
dst_deps.save(dst_depfile)
示例4: post_process_cached_vts
# 需要导入模块: from twitter.pants.tasks.jvm_compiler_dependencies import Dependencies [as 别名]
# 或者: from twitter.pants.tasks.jvm_compiler_dependencies.Dependencies import merge [as 别名]
def post_process_cached_vts(cached_vts):
# Merge the cached analyses into the existing global one.
if cached_vts:
with self.context.new_workunit(name='merge-dependencies'):
global_deps = Dependencies(self._classes_dir)
if os.path.exists(self._depfile):
global_deps.load(self._depfile)
for vt in cached_vts:
for target in vt.targets:
depfile = JavaCompile.create_depfile_path(self._depfile_tmpdir, [target])
if os.path.exists(depfile):
deps = Dependencies(self._classes_dir)
deps.load(depfile)
global_deps.merge(deps)
global_deps.save(self._depfile)
示例5: JavaCompile
# 需要导入模块: from twitter.pants.tasks.jvm_compiler_dependencies import Dependencies [as 别名]
# 或者: from twitter.pants.tasks.jvm_compiler_dependencies.Dependencies import merge [as 别名]
class JavaCompile(NailgunTask):
@classmethod
def setup_parser(cls, option_group, args, mkflag):
NailgunTask.setup_parser(option_group, args, mkflag)
option_group.add_option(mkflag("warnings"), mkflag("warnings", negate=True),
dest="java_compile_warnings", default=True,
action="callback", callback=mkflag.set_bool,
help="[%default] Compile java code with all configured warnings "
"enabled.")
option_group.add_option(mkflag("args"), dest="java_compile_args", action="append",
help="Pass these extra args to javac.")
option_group.add_option(mkflag("partition-size-hint"), dest="java_compile_partition_size_hint",
action="store", type="int", default=-1,
help="Roughly how many source files to attempt to compile together. Set"
" to a large number to compile all sources together. Set this to 0"
" to compile target-by-target. Default is set in pants.ini.")
def __init__(self, context):
NailgunTask.__init__(self, context, workdir=context.config.get('java-compile', 'nailgun_dir'))
if context.options.java_compile_partition_size_hint != -1:
self._partition_size_hint = context.options.java_compile_partition_size_hint
else:
self._partition_size_hint = context.config.getint('java-compile', 'partition_size_hint',
default=1000)
workdir = context.config.get('java-compile', 'workdir')
self._classes_dir = os.path.join(workdir, 'classes')
self._resources_dir = os.path.join(workdir, 'resources')
self._depfile_dir = os.path.join(workdir, 'depfiles')
self._deps = Dependencies(self._classes_dir)
self._jmake_profile = context.config.get('java-compile', 'jmake-profile')
self._compiler_profile = context.config.get('java-compile', 'compiler-profile')
self._opts = context.config.getlist('java-compile', 'args')
self._jvm_args = context.config.getlist('java-compile', 'jvm_args')
self._javac_opts = []
if context.options.java_compile_args:
for arg in context.options.java_compile_args:
self._javac_opts.extend(shlex.split(arg))
else:
self._javac_opts.extend(context.config.getlist('java-compile', 'javac_args', default=[]))
if context.options.java_compile_warnings:
self._opts.extend(context.config.getlist('java-compile', 'warning_args'))
else:
self._opts.extend(context.config.getlist('java-compile', 'no_warning_args'))
self._confs = context.config.getlist('java-compile', 'confs')
# The artifact cache to read from/write to.
artifact_cache_spec = context.config.getlist('java-compile', 'artifact_caches')
self.setup_artifact_cache(artifact_cache_spec)
def product_type(self):
return 'classes'
def can_dry_run(self):
return True
def execute(self, targets):
java_targets = filter(_is_java, targets)
if java_targets:
safe_mkdir(self._classes_dir)
safe_mkdir(self._depfile_dir)
with self.context.state('classpath', []) as cp:
for conf in self._confs:
cp.insert(0, (conf, self._resources_dir))
cp.insert(0, (conf, self._classes_dir))
with self.invalidated(java_targets, invalidate_dependents=True,
partition_size_hint=self._partition_size_hint) as invalidation_check:
for vt in invalidation_check.invalid_vts_partitioned:
# Compile, using partitions for efficiency.
self.execute_single_compilation(vt, cp)
if not self.dry_run:
vt.update()
for vt in invalidation_check.all_vts:
depfile = self.create_depfile_path(vt.targets)
if not self.dry_run and os.path.exists(depfile):
# Read in the deps created either just now or by a previous run on these targets.
deps = Dependencies(self._classes_dir)
deps.load(depfile)
self._deps.merge(deps)
if not self.dry_run:
if self.context.products.isrequired('classes'):
genmap = self.context.products.get('classes')
# Map generated classes to the owning targets and sources.
for target, classes_by_source in self._deps.findclasses(java_targets).items():
for source, classes in classes_by_source.items():
genmap.add(source, self._classes_dir, classes)
genmap.add(target, self._classes_dir, classes)
#.........这里部分代码省略.........
示例6: merge_artifact
# 需要导入模块: from twitter.pants.tasks.jvm_compiler_dependencies import Dependencies [as 别名]
# 或者: from twitter.pants.tasks.jvm_compiler_dependencies.Dependencies import merge [as 别名]
def merge_artifact(self, versioned_target_set):
if len(versioned_target_set.targets) <= 1:
return
with temporary_dir() as tmpdir:
dst_output_dir, dst_depfile, dst_analysis_cache = self.create_output_paths(versioned_target_set.targets)
safe_rmtree(dst_output_dir)
safe_mkdir(dst_output_dir)
src_analysis_caches = []
analysis_args = []
analysis_args.extend(self._zinc_jar_args)
analysis_args.extend([
'-log-level', self.context.options.log_level or 'info',
'-analysis',
])
# TODO: Do we actually need to merge deps? Zinc will stomp them anyway on success.
dst_deps = Dependencies(dst_output_dir)
for target in versioned_target_set.targets:
src_output_dir, src_depfile, src_analysis_cache = self.create_output_paths([target])
if os.path.exists(src_depfile):
src_deps = Dependencies(src_output_dir)
src_deps.load(src_depfile)
dst_deps.merge(src_deps)
classes_by_source = src_deps.findclasses([target]).get(target, {})
for source, classes in classes_by_source.items():
for cls in classes:
src = os.path.join(src_output_dir, cls)
dst = os.path.join(dst_output_dir, cls)
# src may not exist if we aborted a build in the middle. That's OK: zinc will notice that
# it's missing and rebuild it.
# dst may already exist if we have overlapping targets. It's not a good idea
# to have those, but until we enforce it, we must allow it here.
if os.path.exists(src) and not os.path.exists(dst):
# Copy the class file.
safe_mkdir(os.path.dirname(dst))
os.link(src, dst)
# Use zinc to rebase a copy of the per-target analysis files prior to merging.
if os.path.exists(src_analysis_cache):
src_analysis_cache_tmp = \
os.path.join(tmpdir, os.path.relpath(src_analysis_cache, self._analysis_cache_dir))
shutil.copyfile(src_analysis_cache, src_analysis_cache_tmp)
src_analysis_caches.append(src_analysis_cache_tmp)
rebase_args = analysis_args + [
'-cache', src_analysis_cache_tmp,
'-rebase', '%s:%s' % (src_output_dir, dst_output_dir),
]
if self.runjava(self._main, classpath=self._zinc_classpath, args=rebase_args, jvmargs=self._jvm_args):
self.context.log.warn('In merge_artifact: zinc failed to rebase analysis file %s. ' \
'Target may require a full rebuild.' % src_analysis_cache_tmp)
dst_deps.save(dst_depfile)
# Use zinc to merge the analysis files.
merge_args = analysis_args + [
'-cache', dst_analysis_cache,
'-merge', ':'.join(src_analysis_caches),
]
if self.runjava(self._main, classpath=self._zinc_classpath, args=merge_args, jvmargs=self._jvm_args):
raise TaskError, 'zinc failed to merge analysis files %s to %s' % \
(':'.join(src_analysis_caches), dst_analysis_cache)
示例7: JavaCompile
# 需要导入模块: from twitter.pants.tasks.jvm_compiler_dependencies import Dependencies [as 别名]
# 或者: from twitter.pants.tasks.jvm_compiler_dependencies.Dependencies import merge [as 别名]
#.........这里部分代码省略.........
with self.invalidated(java_targets, invalidate_dependents=True,
partition_size_hint=self._partition_size_hint) as invalidation_check:
for vt in invalidation_check.all_vts:
if vt.valid: # Don't compile, just post-process.
self.post_process(vt)
for vt in invalidation_check.invalid_vts_partitioned:
# Compile, using partitions for efficiency.
self.execute_single_compilation(vt, cp)
if not self.dry_run:
vt.update()
if not self.dry_run:
if self.context.products.isrequired('classes'):
genmap = self.context.products.get('classes')
# Map generated classes to the owning targets and sources.
for target, classes_by_source in self._deps.findclasses(java_targets).items():
for source, classes in classes_by_source.items():
genmap.add(source, self._classes_dir, classes)
genmap.add(target, self._classes_dir, classes)
# TODO(John Sirois): Map target.resources in the same way
# 'Map' (rewrite) annotation processor service info files to the owning targets.
for target in java_targets:
if is_apt(target) and target.processors:
basedir = os.path.join(self._resources_dir, target.id)
processor_info_file = os.path.join(basedir, _PROCESSOR_INFO_FILE)
self.write_processor_info(processor_info_file, target.processors)
genmap.add(target, basedir, [_PROCESSOR_INFO_FILE])
def execute_single_compilation(self, vt, cp):
depfile = self.create_depfile_path(vt.targets)
self.merge_depfile(vt) # Get what we can from previous builds.
self.context.log.info('Compiling targets %s' % str(vt.targets))
sources_by_target, processors, fingerprint = self.calculate_sources(vt.targets)
if sources_by_target:
sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values())
if not sources:
self.context.log.warn('Skipping java compile for targets with no sources:\n %s' %
'\n '.join(str(t) for t in sources_by_target.keys()))
else:
classpath = [jar for conf, jar in cp if conf in self._confs]
result = self.compile(classpath, sources, fingerprint, depfile)
if result != 0:
default_message = 'Unexpected error - %s returned %d' % (_JMAKE_MAIN, result)
raise TaskError(_JMAKE_ERROR_CODES.get(result, default_message))
# NOTE: Currently all classfiles go into one global classes_dir. If we compile in
# multiple partitions the second one will cache all the classes of the first one.
# This won't result in error, but is wasteful. Currently, however, Java compilation
# is done in a single pass, so this won't occur in practice.
# TODO: Handle this case better. Separate classes dirs for each partition, like for scala?
artifact_files = [self._classes_dir, depfile]
if processors and not self.dry_run:
# Produce a monolithic apt processor service info file for further compilation rounds
# and the unit test classpath.
processor_info_file = os.path.join(self._classes_dir, _PROCESSOR_INFO_FILE)
if os.path.exists(processor_info_file):
with safe_open(processor_info_file, 'r') as f:
for processor in f:
processors.add(processor.strip())
self.write_processor_info(processor_info_file, processors)
artifact_files.append(processor_info_file)
示例8: ScalaCompile
# 需要导入模块: from twitter.pants.tasks.jvm_compiler_dependencies import Dependencies [as 别名]
# 或者: from twitter.pants.tasks.jvm_compiler_dependencies.Dependencies import merge [as 别名]
#.........这里部分代码省略.........
invalidate_globally = True
else:
invalidate_globally = False
with self.changed(scala_targets, invalidate_dependants=True,
invalidate_globally=invalidate_globally) as changed_targets:
sources_by_target = self.calculate_sources(changed_targets)
if sources_by_target:
sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values())
if not sources:
self.context.log.warn('Skipping scala compile for targets with no sources:\n %s' %
'\n '.join(str(t) for t in sources_by_target.keys()))
else:
classpath = [jar for conf, jar in cp if conf in self._confs]
result = self.compile(classpath, sources, output_dir, analysis_cache, upstream_analysis_caches, depfile)
if result != 0:
raise TaskError('%s returned %d' % (self._main, result))
if output_dir != self._classes_dir:
# Link class files emitted in this compilation into the central classes dir.
for (dirpath, dirnames, filenames) in os.walk(output_dir):
for d in [os.path.join(dirpath, x) for x in dirnames]:
dir = os.path.join(self._classes_dir, os.path.relpath(d, output_dir))
if not os.path.isdir(dir):
os.mkdir(dir)
for f in [os.path.join(dirpath, x) for x in filenames]:
outfile = os.path.join(self._classes_dir, os.path.relpath(f, output_dir))
if os.path.exists(outfile):
os.unlink(outfile)
os.link(f, outfile)
# Read in the deps created either just now or by a previous compiler run on these targets.
self.context.log.debug('Reading dependencies from ' + depfile)
deps = Dependencies(output_dir)
deps.load(depfile)
self._deps.merge(deps)
if not self._flatten:
upstream_analysis_caches[output_dir] = analysis_cache
def calculate_sources(self, targets):
sources = defaultdict(set)
def collect_sources(target):
src = (os.path.join(target.target_base, source)
for source in target.sources if source.endswith('.scala'))
if src:
sources[target].update(src)
if (isinstance(target, ScalaLibrary) or isinstance(target, ScalaTests)) and (
target.java_sources):
sources[target].update(resolve_target_sources(target.java_sources, '.java'))
for target in targets:
collect_sources(target)
return sources
def compile(self, classpath, sources, output_dir, analysis_cache, upstream_analysis_caches, depfile):
safe_mkdir(output_dir)
compiler_classpath = nailgun_profile_classpath(self, self._compile_profile)
compiler_args = []
# TODO(John Sirois): separate compiler profile from runtime profile
compiler_args.extend([
# Support for outputting a dependencies file of source -> class
'-Xplugin:%s' % self.get_depemitter_plugin(),
'-P:depemitter:file:%s' % depfile
示例9: JavaCompile
# 需要导入模块: from twitter.pants.tasks.jvm_compiler_dependencies import Dependencies [as 别名]
# 或者: from twitter.pants.tasks.jvm_compiler_dependencies.Dependencies import merge [as 别名]
#.........这里部分代码省略.........
genmap.add(source, self._classes_dir, classes)
genmap.add(target, self._classes_dir, classes)
# TODO(John Sirois): Map target.resources in the same way
# 'Map' (rewrite) annotation processor service info files to the owning targets.
for target in java_targets:
if is_apt(target) and target.processors:
basedir = os.path.join(self._resources_dir, target.id)
processor_info_file = os.path.join(basedir, _PROCESSOR_INFO_FILE)
self.write_processor_info(processor_info_file, target.processors)
genmap.add(target, basedir, [_PROCESSOR_INFO_FILE])
def execute_single_compilation(self, java_targets, cp):
self.context.log.info('Compiling targets %s' % str(java_targets))
# Compute the id of this compilation. We try to make it human-readable.
if len(java_targets) == 1:
compilation_id = java_targets[0].id
else:
compilation_id = self.context.identify(java_targets)
if self._flatten:
# If compiling in flat mode, we let all dependencies aggregate into a single well-known depfile. This
# allows us to build different targets in different invocations without losing dependency information
# from any of them.
depfile = os.path.join(self._depfile_dir, 'dependencies.flat')
else:
# If not in flat mode, we let each compilation have its own depfile, to avoid quadratic behavior (each
# compilation will read in the entire depfile, add its stuff to it and write it out again).
depfile = os.path.join(self._depfile_dir, compilation_id) + '.dependencies'
with self.changed(java_targets, invalidate_dependants=True) as changed:
sources_by_target, processors, fingerprint = self.calculate_sources(changed)
if sources_by_target:
sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values())
if not sources:
self.context.log.warn('Skipping java compile for targets with no sources:\n %s' %
'\n '.join(str(t) for t in sources_by_target.keys()))
else:
classpath = [jar for conf, jar in cp if conf in self._confs]
result = self.compile(classpath, sources, fingerprint, depfile)
if result != 0:
default_message = 'Unexpected error - %s returned %d' % (_JMAKE_MAIN, result)
raise TaskError(_JMAKE_ERROR_CODES.get(result, default_message))
if processors:
# Produce a monolithic apt processor service info file for further compilation rounds
# and the unit test classpath.
processor_info_file = os.path.join(self._classes_dir, _PROCESSOR_INFO_FILE)
if os.path.exists(processor_info_file):
with safe_open(processor_info_file, 'r') as f:
for processor in f:
processors.add(processor.strip())
self.write_processor_info(processor_info_file, processors)
# Read in the deps created either just now or by a previous compiler run on these targets.
deps = Dependencies(self._classes_dir)
deps.load(depfile)
self._deps.merge(deps)
def calculate_sources(self, targets):
sources = defaultdict(set)
processors = set()
def collect_sources(target):
src = (os.path.join(target.target_base, source)
for source in target.sources if source.endswith('.java'))
if src:
sources[target].update(src)
if is_apt(target) and target.processors:
processors.update(target.processors)
for target in targets:
collect_sources(target)
return sources, processors, self.context.identify(targets)
def compile(self, classpath, sources, fingerprint, depfile):
jmake_classpath = nailgun_profile_classpath(self, self._jmake_profile)
args = [
'-classpath', ':'.join(classpath),
'-d', self._classes_dir,
'-pdb', os.path.join(self._classes_dir, '%s.dependencies.pdb' % fingerprint),
]
compiler_classpath = nailgun_profile_classpath(self, self._compiler_profile)
args.extend([
'-jcpath', ':'.join(compiler_classpath),
'-jcmainclass', 'com.twitter.common.tools.Compiler',
'-C-Tdependencyfile', '-C%s' % depfile,
])
args.extend(self._args)
args.extend(sources)
log.debug('Executing: %s %s' % (_JMAKE_MAIN, ' '.join(args)))
return self.runjava(_JMAKE_MAIN, classpath=jmake_classpath, args=args, jvmargs=self._jvm_args)
def write_processor_info(self, processor_info_file, processors):
with safe_open(processor_info_file, 'w') as f:
for processor in processors:
f.write('%s\n' % processor)