本文整理匯總了Python中ruffus.Pipeline.subdivide方法的典型用法代碼示例。如果您正苦於以下問題:Python Pipeline.subdivide方法的具體用法?Python Pipeline.subdivide怎麽用?Python Pipeline.subdivide使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類ruffus.Pipeline
的用法示例。
在下文中一共展示了Pipeline.subdivide方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_newstyle_ruffus
# 需要導入模塊: from ruffus import Pipeline [as 別名]
# 或者: from ruffus.Pipeline import subdivide [as 別名]
def test_newstyle_ruffus (self):
print(" Run pipeline normally...")
test_pipeline = Pipeline("test")
test_pipeline.originate(make_start, [tempdir + 'start'])
test_pipeline.split(split_start, make_start, tempdir + '*.split')
test_pipeline.subdivide(subdivide_start, split_start, formatter(), tempdir + '{basename[0]}_*.subdivided', tempdir + '{basename[0]}')
if self.graph_viz_present:
test_pipeline.printout_graph(tempdir + "flowchart.dot")
test_pipeline.printout_graph(tempdir + "flowchart.jpg",
target_tasks =[subdivide_start],
forcedtorun_tasks = [split_start],
no_key_legend = True)
test_pipeline.printout_graph(tempdir + "flowchart.svg", no_key_legend = False)
# Unknown format
try:
test_pipeline.printout_graph(tempdir + "flowchart.unknown", no_key_legend = False)
raise Exception("Failed to throw exception for test_pipeline.printout_graph unknown extension ")
except CalledProcessError as err:
pass
test_pipeline.printout_graph(tempdir + "flowchart.unknown", "svg", no_key_legend = False)
else:
test_pipeline.printout_graph(tempdir + "flowchart.dot",
target_tasks =[subdivide_start],
forcedtorun_tasks = [split_start],
no_key_legend = True)
示例2: test_newstyle_ruffus
# 需要導入模塊: from ruffus import Pipeline [as 別名]
# 或者: from ruffus.Pipeline import subdivide [as 別名]
def test_newstyle_ruffus(self):
# alternative syntax
test_pipeline = Pipeline("test")
test_pipeline.mkdir(data_dir, work_dir)
test_pipeline.originate(task_func=task1,
output=[os.path.join(data_dir, "%s.1" % aa) for aa in "abcd"])
test_pipeline.mkdir(filter=suffix(".1"),
output=".dir",
output_dir=work_dir)
test_pipeline.transform(task_func=task2,
input=task1,
filter=suffix(".1"),
output=[".1", ".bak"],
extras=["extra.tst", 4, r"orig_dir=\1"],
output_dir=work_dir)
test_pipeline.subdivide(task3, task2, suffix(
".1"), r"\1.*.2", [r"\1.a.2", r"\1.b.2"], output_dir=data_dir)
test_pipeline.transform(task4, task3, suffix(
".2"), ".3", output_dir=work_dir)
test_pipeline.merge(task5, task4, os.path.join(data_dir, "summary.5"))
test_pipeline.run(multiprocess=50, verbose=0)
with open(os.path.join(data_dir, "summary.5")) as ii:
active_text = ii.read()
if active_text != expected_active_text:
raise Exception("Error:\n\tExpected\n%s\nInstead\n%s\n" %
(expected_active_text, active_text))
示例3: test_newstyle_collate
# 需要導入模塊: from ruffus import Pipeline [as 別名]
# 或者: from ruffus.Pipeline import subdivide [as 別名]
def test_newstyle_collate(self):
"""
As above but create pipeline on the fly using object orientated syntax rather than decorators
"""
#
# Create pipeline on the fly, joining up tasks
#
test_pipeline = Pipeline("test")
test_pipeline.originate(task_func=generate_initial_files,
output=original_files)\
.mkdir(tempdir, tempdir+"/test")
test_pipeline.subdivide(task_func=split_fasta_file,
input=generate_initial_files,
# match original files
filter=regex(r".*\/original_(\d+).fa"),
output=[tempdir + r"/files.split.\1.success", # flag file for each original file
tempdir + r"/files.split.\1.*.fa"], # glob pattern
extras=[r"\1"])\
.posttask(lambda: sys.stderr.write("\tSplit into %d files each\n" % JOBS_PER_TASK))
test_pipeline.transform(task_func=align_sequences,
input=split_fasta_file,
filter=suffix(".fa"),
output=".aln") \
.posttask(lambda: sys.stderr.write("\tSequences aligned\n"))
test_pipeline.transform(task_func=percentage_identity,
input=align_sequences, # find all results from align_sequences
# replace suffix with:
filter=suffix(".aln"),
output=[r".pcid", # .pcid suffix for the result
r".pcid_success"] # .pcid_success to indicate job completed
)\
.posttask(lambda: sys.stderr.write("\t%Identity calculated\n"))
test_pipeline.collate(task_func=combine_results,
input=percentage_identity,
filter=regex(r".*files.split\.(\d+)\.\d+.pcid"),
output=[tempdir + r"/\1.all.combine_results",
tempdir + r"/\1.all.combine_results_success"])\
.posttask(lambda: sys.stderr.write("\tResults recombined\n"))
#
# Cleanup, printout and run
#
self.cleanup_tmpdir()
s = StringIO()
test_pipeline.printout(s, [combine_results],
verbose=5, wrap_width=10000)
self.assertTrue(re.search(
'Job needs update:.*Missing files.*', s.getvalue(), re.DOTALL) is not None)
test_pipeline.run(verbose=0)
示例4: test_newstyle_ruffus
# 需要導入模塊: from ruffus import Pipeline [as 別名]
# 或者: from ruffus.Pipeline import subdivide [as 別名]
def test_newstyle_ruffus(self):
test_pipeline = Pipeline("test")
test_pipeline.originate(task_func=make_start,
output=[tempdir + 'start'])
test_pipeline.split(task_func=split_start,
input=make_start, output=tempdir + '*.split')
test_pipeline.subdivide(task_func=subdivide_start, input=split_start, filter=formatter(
), output=tempdir + '{basename[0]}_*.subdivided', extras=[tempdir + '{basename[0]}'])
expected_files_after_1_runs = ["start", "0.split", "0_0.subdivided"]
expected_files_after_2_runs = [
"1.split", "0_1.subdivided", "1_0.subdivided"]
expected_files_after_3_runs = [
"2.split", "0_2.subdivided", "1_1.subdivided", "2_0.subdivided"]
expected_files_after_4_runs = [
"3.split", "0_3.subdivided", "1_2.subdivided", "2_1.subdivided", "3_0.subdivided"]
print(" 1 Run pipeline normally...")
test_pipeline.run(multiprocess=10, verbose=TEST_VERBOSITY)
self.check_file_exists_or_not_as_expected(expected_files_after_1_runs,
expected_files_after_2_runs)
print(" 2 Check that running again does nothing. (All up to date).")
test_pipeline.run(multiprocess=10, verbose=TEST_VERBOSITY)
self.check_file_exists_or_not_as_expected(expected_files_after_1_runs,
expected_files_after_2_runs)
time.sleep(2)
print(" 3 Running again with forced tasks to generate more files...")
test_pipeline.run(forcedtorun_tasks=[
"test::make_start"], multiprocess=10, verbose=TEST_VERBOSITY)
self.check_file_exists_or_not_as_expected(expected_files_after_1_runs
+ expected_files_after_2_runs,
expected_files_after_3_runs)
print(" 4 Check that running again does nothing. (All up to date).")
test_pipeline.run(multiprocess=10, verbose=TEST_VERBOSITY)
self.check_file_exists_or_not_as_expected(expected_files_after_1_runs
+ expected_files_after_2_runs,
expected_files_after_3_runs)
time.sleep(2)
print(" 5 Running again with forced tasks to generate even more files...")
test_pipeline.run(forcedtorun_tasks=make_start,
multiprocess=10, verbose=TEST_VERBOSITY)
self.check_file_exists_or_not_as_expected(expected_files_after_1_runs
+ expected_files_after_2_runs
+ expected_files_after_3_runs,
expected_files_after_4_runs)
print(" 6 Check that running again does nothing. (All up to date).")
test_pipeline.run(multiprocess=10, verbose=TEST_VERBOSITY)
self.check_file_exists_or_not_as_expected(expected_files_after_1_runs
+ expected_files_after_2_runs
+ expected_files_after_3_runs,
expected_files_after_4_runs)