本文整理汇总了Python中galaxy.util.bunch.Bunch.output方法的典型用法代码示例。如果您正苦于以下问题:Python Bunch.output方法的具体用法?Python Bunch.output怎么用?Python Bunch.output使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类galaxy.util.bunch.Bunch
的用法示例。
在下文中一共展示了Bunch.output方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __main__
# 需要导入模块: from galaxy.util.bunch import Bunch [as 别名]
# 或者: from galaxy.util.bunch.Bunch import output [as 别名]
def __main__():
#Parse Command Line
parser = optparse.OptionParser()
parser.add_option( '', '--threads', dest='threads', help='The number of threads to use' )
parser.add_option( '', '--ref_name', dest='ref_name', help='The reference name to change all output matches to' )
parser.add_option( '', '--ref_source', dest='ref_source', help='Whether the reference is self, cached or from the history' )
parser.add_option( '', '--ref_sequences', dest='ref_sequences', help='Number of sequences in the reference dataset' )
parser.add_option( '', '--mirror', dest='mirror', help='Do or do not report mirror image of all gap-free alignments' )
parser.add_option( '', '--source_select', dest='source_select', help='Whether to used pre-set or cached reference file' )
parser.add_option( '', '--input1', dest='input1', help='The name of the reference file if using history or reference base name if using cached' )
parser.add_option( '', '--input2', dest='input2', help='The reads file to align' )
parser.add_option( '', '--strand', dest='strand', help='Which strand of the read to search, if specifying all parameters' )
parser.add_option( '', '--match_reward', dest='match_reward', help='Score values for a match (reward)' )
parser.add_option( '', '--match_penalty', dest='match_penalty', help='Score values for a mismatch (penalty), same as reward when not specified (but reward is)' )
parser.add_option( '', '--gapped', dest='gapped', help='Perform gapped extension of HSPs (or seeds if gapped-free extension is not performed) after first reducing them to anchor points' )
parser.add_option( '', '--gap_open', dest='gap_open', help='Score penalties for opening a gap' )
parser.add_option( '', '--gap_extend', dest='gap_extend', help='Score penalties for extending a gap' )
parser.add_option( '', '--ambiguous', dest='ambiguous', help='Treat as ambiguous nucleotides' )
parser.add_option( '', '--step', dest='step', help='Offset between the starting positions of successive target words considered for potential seeds' )
parser.add_option( '', '--masking', dest='masking', help='Dynamically mask the target sequence by excluding any positions that appear in too many alignments from further consideration for seeds' )
parser.add_option( '', '--seed', dest='seed', help='Offset between the starting positions of successive target words considered for potential seeds' )
parser.add_option( '', '--match_length', dest='match_length', help='Seeds require bp word of this length with matches in all positions' )
parser.add_option( '', '--transition', dest='transition', help='Transition settings, affects the number of allowed transition substitutions in each seed' )
parser.add_option( '', '--xdrop', dest='xdrop', help='Find HSPs using the xdrop extension method with the given termination threshold instead of using the exact match method' )
parser.add_option( '', '--hspthresh', dest='hspthresh', help='Score threshold for the x-drop extension method' )
parser.add_option( '', '--entropy', dest='entropy', help='Whether to adjust for entropy when qualifying HSPs in the x-drop extension method' )
parser.add_option( '', '--chain', dest='chain', help='Perform chaining of HSPs with no penalties' )
parser.add_option( '', '--ydrop', dest='ydrop', help='Set the threshold for terminating gapped extension' )
parser.add_option( '', '--ytrim', dest='ytrim', help='Trim back to peak score if y-drop extension encounters end of sequence' )
parser.add_option( '', '--gappedthresh', dest='gappedthresh', help='Threshold for gapped extension. Alignments scoring lower are discarded.' )
parser.add_option( '', '--filter', dest='filter', help='Filter alignments.' )
parser.add_option( '', '--identity_min', dest='identity_min', help='Minimum for filtering alignments by their percent identity.' )
parser.add_option( '', '--identity_max', dest='identity_max', help='Maximum for filtering alignments by their percent identity.' )
parser.add_option( '', '--coverage_min', dest='coverage_min', help='Minimum for filtering alignments by how much of the input sequence they cover.' )
parser.add_option( '', '--coverage_max', dest='coverage_max', help='Maximum for filtering alignments by how much of the input sequence they cover.' )
parser.add_option( '', '--nmatch_min', dest='nmatch_min', help='Minimum for filtering alignments by how many bases they match.' )
parser.add_option( '', '--nmismatch_max', dest='nmismatch_max', help='Maximum for filtering alignments by the number of mismatches.' )
parser.add_option( '', '--trivial', dest='trivial', help='Do or do not output a trivial self-alignment block if the target and query sequences are identical.' )
parser.add_option( '', '--inner', dest='inner', help='Perform additional alignment between the gapped alignment blocks using (presumably) more sensitive alignment parameters.' )
parser.add_option( '', '--shortcuts_for_yasra', dest='shortcuts_for_yasra', help='Shortcut options to support the Yasra mapping assembler' )
parser.add_option( '', '--out_format', dest='format', help='The format of the output file (sam, diffs, or tabular (general))' )
parser.add_option( '', '--output', dest='output', help='The output file' )
parser.add_option( '', '--lastzSeqsFileDir', dest='lastzSeqsFileDir', help='Directory of local lastz_seqs.loc file' )
( options, args ) = parser.parse_args()
# Output version # of tool
try:
tmp = tempfile.NamedTemporaryFile().name
tmp_stdout = open( tmp, 'wb' )
proc = subprocess.Popen( args='lastz -v', shell=True, stdout=tmp_stdout )
tmp_stdout.close()
returncode = proc.wait()
stdout = None
for line in open( tmp_stdout.name, 'rb' ):
if line.lower().find( 'version' ) >= 0:
stdout = line.strip()
break
if stdout:
sys.stdout.write( '%s\n' % stdout )
else:
raise Exception
except:
sys.stdout.write( 'Could not determine Lastz version\n' )
if options.ref_name:
ref_name = '[nickname=%s]' % options.ref_name
else:
ref_name = ''
set_options = ''
# Commonly-used preset options
if options.source_select == 'pre_set':
# Handle ref_source
if options.ref_source == 'self':
# --mirror is available only if ref_source selection is --self
if options.mirror == 'yes':
set_options += '--nomirror '
else:
# Full set of user-specified options
# Handle ref_source
if options.ref_source == 'self':
# --mirror is available only if ref_source selection is --self
if options.mirror == 'yes':
set_options += '--nomirror '
else:
# Using --self automatically enables this option
if options.trivial == 'no':
set_options += '--notrivial '
# Handle --match
if options.match_reward not in [ "", "0" ]:
if options.match_penalty in [ "", "0" ]:
match_penalty = options.match_reward
else:
match_penalty = options.match_penalty
set_options += '--match=%s,%s ' % ( options.match_reward, match_penalty )
# Handle --gapped
if options.gapped == 'yes':
set_options += '--gapped '
if options.gap_open not in [ "" ]:
if options.gap_extend in [ "" ]:
set_options += '--gap=%s ' % options.gap_open
else:
#.........这里部分代码省略.........
示例2: __main__
# 需要导入模块: from galaxy.util.bunch import Bunch [as 别名]
# 或者: from galaxy.util.bunch.Bunch import output [as 别名]
def __main__():
#Parse Command Line
parser = optparse.OptionParser()
parser.add_option( '', '--ref_name', dest='ref_name', help='The reference name to change all output matches to' )
parser.add_option( '', '--ref_source', dest='ref_source', help='Whether the reference is cached or from the history' )
parser.add_option( '', '--ref_sequences', dest='ref_sequences', help='Number of sequences in the reference dataset' )
parser.add_option( '', '--source_select', dest='source_select', help='Whether to used pre-set or cached reference file' )
parser.add_option( '', '--input1', dest='input1', help='The name of the reference file if using history or reference base name if using cached' )
parser.add_option( '', '--input2', dest='input2', help='The reads file to align' )
parser.add_option( '', '--pre_set_options', dest='pre_set_options', help='Which of the pre set options to use, if using pre-sets' )
parser.add_option( '', '--strand', dest='strand', help='Which strand of the read to search, if specifying all parameters' )
parser.add_option( '', '--seed', dest='seed', help='Seeding settings, if specifying all parameters' )
parser.add_option( '', '--transition', dest='transition', help='Number of transitions to allow in each seed hit, if specifying all parameters' )
parser.add_option( '', '--gfextend', dest='gfextend', help='Whether to perform gap-free extension of seed hits to HSPs (high scoring segment pairs), if specifying all parameters' )
parser.add_option( '', '--chain', dest='chain', help='Whether to perform chaining of HSPs, if specifying all parameters' )
parser.add_option( '', '--O', dest='O', help='Gap opening penalty, if specifying all parameters' )
parser.add_option( '', '--E', dest='E', help='Gap extension penalty, if specifying all parameters' )
parser.add_option( '', '--X', dest='X', help='X-drop threshold, if specifying all parameters' )
parser.add_option( '', '--Y', dest='Y', help='Y-drop threshold, if specifying all parameters' )
parser.add_option( '', '--K', dest='K', help='Threshold for HSPs, if specifying all parameters' )
parser.add_option( '', '--L', dest='L', help='Threshold for gapped alignments, if specifying all parameters' )
parser.add_option( '', '--entropy', dest='entropy', help='Whether to involve entropy when filtering HSPs, if specifying all parameters' )
parser.add_option( '', '--identity_min', dest='identity_min', help="Minimum identity (don't report matches under this identity)" )
parser.add_option( '', '--identity_max', dest='identity_max', help="Maximum identity (don't report matches above this identity)" )
parser.add_option( '', '--coverage', dest='coverage', help="The minimum coverage value (don't report matches covering less than this)" )
parser.add_option( '', '--unmask', dest='unmask', help='Whether to convert lowercase bases to uppercase' )
parser.add_option( '', '--out_format', dest='format', help='The format of the output file (sam, diffs, or tabular (general))' )
parser.add_option( '', '--output', dest='output', help='The output file' )
parser.add_option( '', '--lastzSeqsFileDir', dest='lastzSeqsFileDir', help='Directory of local lastz_seqs.loc file' )
( options, args ) = parser.parse_args()
# output version # of tool
try:
tmp = tempfile.NamedTemporaryFile().name
tmp_stdout = open( tmp, 'wb' )
proc = subprocess.Popen( args='lastz -v', shell=True, stdout=tmp_stdout )
tmp_stdout.close()
returncode = proc.wait()
stdout = None
for line in open( tmp_stdout.name, 'rb' ):
if line.lower().find( 'version' ) >= 0:
stdout = line.strip()
break
if stdout:
sys.stdout.write( '%s\n' % stdout )
else:
raise Exception
except:
sys.stdout.write( 'Could not determine Lastz version\n' )
if options.unmask == 'yes':
unmask = '[unmask]'
else:
unmask = ''
if options.ref_name:
ref_name = '[nickname=%s]' % options.ref_name
else:
ref_name = ''
# Prepare for commonly-used preset options
if options.source_select == 'pre_set':
set_options = '--%s' % options.pre_set_options
# Prepare for user-specified options
else:
set_options = '--%s --%s --gapped --strand=%s --seed=%s --%s O=%s E=%s X=%s Y=%s K=%s L=%s --%s' % \
( options.gfextend, options.chain, options.strand, options.seed, options.transition,
options.O, options.E, options.X, options.Y, options.K, options.L, options.entropy )
# Specify input2 and add [fullnames] modifier if output format is diffs
if options.format == 'diffs':
input2 = '%s[fullnames]' % options.input2
else:
input2 = options.input2
if options.format == 'tabular':
# Change output format to general if it's tabular and add field names for tabular output
format = 'general-'
tabular_fields = ':score,name1,strand1,size1,start1,zstart1,end1,length1,text1,name2,strand2,size2,start2,zstart2,end2,start2+,zstart2+,end2+,length2,text2,diff,cigar,identity,coverage,gaprate,diagonal,shingle'
elif options.format == 'sam':
# We currently ALWAYS suppress SAM headers.
format = 'sam-'
tabular_fields = ''
else:
format = options.format
tabular_fields = ''
# Set up our queues
lastz_job_queue = LastzJobQueue( WORKERS, slots=SLOTS )
combine_data_queue = CombineDataQueue( options.output )
if options.ref_source == 'history':
# Reference is a fasta dataset from the history, so split job across
# the number of sequences in the dataset ( this could be a HUGE number )
try:
# Ensure there is at least 1 sequence in the dataset ( this may not be necessary ).
error_msg = "The reference dataset is missing metadata, click the pencil icon in the history item and 'auto-detect' the metadata attributes."
ref_sequences = int( options.ref_sequences )
if ref_sequences < 1:
stop_queues( lastz_job_queue, combine_data_queue )
stop_err( error_msg )
except:
stop_queues( lastz_job_queue, combine_data_queue )
stop_err( error_msg )
#.........这里部分代码省略.........