本文整理汇总了Python中pymodule.PassingData类的典型用法代码示例。如果您正苦于以下问题:Python PassingData类的具体用法?Python PassingData怎么用?Python PassingData使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了PassingData类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_data_matrix
def get_data_matrix(self, db, phenotype_info, list_type_analysis_method_info, where_condition):
sys.stderr.write("Getting data matrix ...")
data_matrix = num.zeros([len(list_type_analysis_method_info.list_type_id_analysis_method_id2index), len(phenotype_info.phenotype_method_id2index)], num.float)
data_matrix[:] = -1
i = 0
rows = db.metadata.bind.execute("select r.analysis_method_id, r.phenotype_method_id, c.* from %s order by analysis_method_id"\
%(where_condition))
min_value = None
max_value = None
for row in rows:
tup = (row.list_type_id, row.analysis_method_id)
row_index = list_type_analysis_method_info.list_type_id_analysis_method_id2index[tup]
col_index = phenotype_info.phenotype_method_id2index[row.phenotype_method_id]
if row.pvalue>0:
data_value = -math.log10(row.pvalue)
if min_value==None:
min_value = data_value
elif data_value<min_value:
min_value = data_value
if max_value==None:
max_value=data_value
elif data_value>max_value:
max_value =data_value
else:
data_value = -2 #0 pvalue
data_matrix[row_index, col_index] = data_value
sys.stderr.write("Done.\n")
return_data = PassingData()
return_data.data_matrix = data_matrix
return_data.min_value = min_value
return_data.max_value = max_value
return return_data
示例2: merge_call_on_one_row
def merge_call_on_one_row(cls, ecotypeid_duplicate_index_ls, data_matrix, no_of_cols, NA_set=Set([0, -2])):
"""
2008-07-11
calculate the inconsistency ratio among duplicates
2008-05-12
-2 is also ruled out, add NA_set
"""
one_row = numpy.zeros(no_of_cols)
passingdata = PassingData()
passingdata.no_of_non_NA_pairs = 0
passingdata.no_of_non_NA_inconsistent_pairs = 0
for i in range(no_of_cols):
call_counter_ls = [0]*11
non_NA_call_number_set = Set()
for index in ecotypeid_duplicate_index_ls:
call_number = data_matrix[index][i]
if call_number not in NA_set: #dont' need NA and non-touched bit
call_counter_ls[call_number] += 1
non_NA_call_number_set.add(call_number)
if len(non_NA_call_number_set)>0:
passingdata.no_of_non_NA_pairs += 1
if len(non_NA_call_number_set)>1:
passingdata.no_of_non_NA_inconsistent_pairs += 1
one_row[i] = dbSNP2data.get_majority_call_number(call_counter_ls)
passingdata.one_row = one_row
return passingdata
示例3: orderListTypeAnalysisMethodID
def orderListTypeAnalysisMethodID(self, list_type_id_ls, analysis_method_id_ls):
"""
2008-08-29
deal with separator (list_type_id=-1) in list_type_id_ls
"""
sys.stderr.write("Orderinig list type id and analysis_method id ... ")
list_type_id_analysis_method_id_ls = []
list_type_id_analysis_method_id2index = {}
list_type_analysis_method_label_ls = []
no_of_separators = 0
for list_type_id in list_type_id_ls:
if list_type_id==-1: #separator
no_of_separators += 1
tup = (-no_of_separators,-1)
list_type_id_analysis_method_id2index[tup] = len(list_type_id_analysis_method_id_ls)
list_type_id_analysis_method_id_ls.append(tup)
list_type_analysis_method_label_ls.append('')
continue
list_type_short_name = GeneListType.get(list_type_id).short_name
for analysis_method_id in analysis_method_id_ls:
analysis_method_short_name = AnalysisMethod.get(analysis_method_id).short_name
tup = (list_type_id, analysis_method_id)
list_type_id_analysis_method_id2index[tup] = len(list_type_id_analysis_method_id_ls)
list_type_id_analysis_method_id_ls.append(tup)
list_type_analysis_method_label_ls.append('%s_%s_%s'%(analysis_method_short_name, list_type_short_name, list_type_id))
return_data = PassingData()
return_data.list_type_id_analysis_method_id_ls = list_type_id_analysis_method_id_ls
return_data.list_type_id_analysis_method_id2index = list_type_id_analysis_method_id2index
return_data.list_type_analysis_method_label_ls = list_type_analysis_method_label_ls
sys.stderr.write("Done.\n")
return return_data
示例4: getDataStructureFromSNPsD
def getDataStructureFromSNPsD(self, snpsd):
"""
05/07/08
"""
sys.stderr.write("Reading data ...")
no_of_rows = len(snpsd.positions)
no_of_cols = len(snpsd.accessions)
snps = []
nucs = []
for i in range(no_of_rows):
one_snp_ls, symbol2counts = self.get_symbol2counts(snpsd.snps, fixed_index=i, no_of_rolls=no_of_cols, by_row=0)
passingdata = self.get_symbol2MAJ_MIN(symbol2counts)
if passingdata.symbol2MAJ_MIN==3:
sys.stderr.write("Error: SNP %s (%s) has more than 2 alleles: %s.\n"%(i, snpsd.positions[i], repr(symbol2counts)))
sys.exit(2)
map_func = lambda x: passingdata.symbol2MAJ_MIN[x]
one_snp_ls = map(map_func, one_snp_ls)
snps.append(''.join(one_snp_ls))
nucs += [(passingdata.major, passingdata.minor)]
passingdata = PassingData()
passingdata.snps = array(snps)
passingdata.sdps = Set(snps)
passingdata.nucs = array(nucs)
passingdata.numSamps = no_of_cols
sys.stderr.write("Done.\n")
return passingdata.snps, passingdata.sdps, passingdata.nucs, passingdata.numSamps
示例5: getCallMethodInfo
def getCallMethodInfo(cls, affiliated_table_name, extra_condition=None, extra_tables=None):
"""
2009-1-30
similar to getPhenotypeInfo, getListTypeInfo, getAnalysisMethodInfo
"""
table_str = "%s s, %s p" % (affiliated_table_name, model.Stock_250kDB.CallMethod.table.name)
if extra_tables:
table_str += ", %s" % extra_tables
where_condition = "p.id=s.call_method_id"
if extra_condition:
where_condition += " and %s" % extra_condition
rows = model.db.metadata.bind.execute(
"select distinct p.id, p.short_name from %s \
where %s order by p.id"
% (table_str, where_condition)
)
id_ls = []
id2index = {}
label_ls = []
prev_biology_category_id = -1
no_of_separators = 0
for row in rows:
id2index[row.id] = len(id_ls)
id_ls.append(row.id)
label_ls.append("%s %s" % (row.id, row.short_name))
list_info = PassingData()
list_info.id2index = id2index
list_info.id_ls = id_ls
list_info.label_ls = label_ls
return list_info
示例6: getPhenotypeInfo
def getPhenotypeInfo(self, db, where_condition):
"""
2008-08-29
add -1 as a separator into phenotype_method_id_ls and others
"""
sys.stderr.write("Getting phenotype method info ...")
rows = db.metadata.bind.execute("select distinct r.phenotype_method_id, p.biology_category_id from %s p, %s and p.id=r.phenotype_method_id order by p.biology_category_id, r.phenotype_method_id"\
%(PhenotypeMethod.table.name, where_condition))
phenotype_method_id_ls = []
phenotype_method_id2index = {}
phenotype_method_label_ls = []
prev_biology_category_id = None
no_of_separators = 0
for row in rows:
if prev_biology_category_id == None:
prev_biology_category_id = row.biology_category_id
elif row.biology_category_id!=prev_biology_category_id:
prev_biology_category_id = row.biology_category_id
#add a blank phenotype id as separator
no_of_separators += 1
phenotype_method_id2index[-no_of_separators] = len(phenotype_method_id_ls)
phenotype_method_id_ls.append(-no_of_separators)
phenotype_method_label_ls.append('')
phenotype_method_id2index[row.phenotype_method_id] = len(phenotype_method_id_ls)
phenotype_method_id_ls.append(row.phenotype_method_id)
pm = PhenotypeMethod.get(row.phenotype_method_id)
phenotype_method_label_ls.append('%s_%s'%(pm.id, pm.short_name))
phenotype_info = PassingData()
phenotype_info.phenotype_method_id2index = phenotype_method_id2index
phenotype_info.phenotype_method_id_ls = phenotype_method_id_ls
phenotype_info.phenotype_method_label_ls = phenotype_method_label_ls
sys.stderr.write("Done.\n")
return phenotype_info
示例7: returnGeneSegments
def returnGeneSegments(self, db, elem=None, gene_commentary=None, commentary_type=None):
"""
2012.5.15
add argument commentary_type to stop replicating gene_commentary.gene_commentary_type
2008-07-28
"""
start_ls, stop_ls, gi_ls = self.return_location_list(elem)
gene_segments = []
min_start = start_ls[0]
max_stop = stop_ls[0]
if commentary_type:
gene_commentary_type = db.getGeneCommentaryType(commentary_type=commentary_type)
else:
gene_commentary_type = gene_commentary.gene_commentary_type
for i in range(len(start_ls)):
start = start_ls[i]
stop = stop_ls[i]
min_start_stop = min(start, stop)
max_start_stop = max(start, stop)
if min_start_stop < min_start:
min_start = min_start_stop
if max_start_stop > max_stop:
max_stop = max_start_stop
gi = gi_ls[i]
gene_segment = GeneSegment(start=start, stop=stop, gi=gi, gene_commentary_type=gene_commentary_type)
gene_segment.gene_commentary = gene_commentary
gene_segments.append(gene_segment)
passingdata = PassingData()
passingdata.gene_segments = gene_segments
passingdata.start = min_start
passingdata.stop = max_stop
return passingdata
示例8: computing_node_handler
def computing_node_handler(self, communicator, data, param_obj):
"""
2009-9-16
parameter test_type is renamed to test_type_id
2008-08-20
wrap all parameters into pd and pass it to run_wilcox_test
2008-07-17
"""
node_rank = communicator.rank
sys.stderr.write("Node no.%s working...\n"%node_rank)
data = cPickle.loads(data)
result_ls = []
pd = PassingData(snps_context_wrapper=param_obj.snps_context_wrapper,\
results_directory=param_obj.results_directory,\
min_MAF=param_obj.min_MAF, get_closest=self.get_closest, min_distance=self.min_distance, \
min_sample_size=self.min_sample_size, test_type_id=self.test_type_id, \
results_type=self.results_type, no_of_permutations=self.no_of_permutations,\
no_of_min_breaks=self.no_of_min_breaks)
for results_method_id, list_type_id in data:
pd.results_id = results_method_id
pd.list_type_id = list_type_id
result = self.run_wilcox_test(pd)
if result is not None:
result_ls.append(result)
sys.stderr.write("Node no.%s done with %s results.\n"%(node_rank, len(result_ls)))
return result_ls
示例9: get_symbol2MAJ_MIN
def get_symbol2MAJ_MIN(self, symbol2counts):
#construct a dictionary to map input symbols to MAJ, MIN or '?'
symbol2MAJ_MIN = {self.input_NA_char:'?'} #'NA' is always '?'
symbols = symbol2counts.keys()
if len(symbols) == 0:
major = ''
minor = ''
elif len(symbols) == 1:
symbol2MAJ_MIN[symbols[0]] = MAJ
major = symbols[0]
minor = ''
elif len(symbols) ==2:
major, minor = symbols
if symbol2counts[major]<symbol2counts[minor]:
minor, major = symbols #reverse them
symbol2MAJ_MIN[major] = MAJ
symbol2MAJ_MIN[minor] = MIN
elif len(symbols)>2:
major, minor = None, None
symbol2MAJ_MIN = 3
passingdata = PassingData()
passingdata.symbol2MAJ_MIN = symbol2MAJ_MIN
passingdata.major = major
passingdata.minor = minor
return passingdata
示例10: getStrainIDInfo
def getStrainIDInfo(self, db, strain_id_info_query, strain_id_set=None):
"""
2008-08-29
"""
sys.stderr.write("Getting strain id info ...")
rows = db.metadata.bind.execute(strain_id_info_query)
strain_id_ls = []
strain_id2index = {}
strain_label_ls = []
prev_country_abbr = None
no_of_separators = 0
for row in rows:
if strain_id_set and row.strainid not in strain_id_set: #skip
continue
if prev_country_abbr == None:
prev_country_abbr = row.abbr
elif row.abbr!=prev_country_abbr:
prev_country_abbr = row.abbr
no_of_separators += 1
strain_id2index[-no_of_separators] = len(strain_id_ls)
strain_id_ls.append(-no_of_separators)
strain_label_ls.append('')
strain_id2index[row.strainid] = len(strain_id_ls)
strain_id_ls.append(row.strainid)
if len(row.sitename)>10:
sitename = row.sitename[:10]
else:
sitename = row.sitename
strain_label_ls.append('%s_%s_%s_%s'%(row.abbr, sitename, row.nativename, row.strainid))
strain_id_info = PassingData()
strain_id_info.strain_id_ls = strain_id_ls
strain_id_info.strain_id2index = strain_id2index
strain_id_info.strain_label_ls = strain_label_ls
sys.stderr.write("Done.\n")
return strain_id_info
示例11: preReduce
def preReduce(self, workflow=None, passingData=None, transferOutput=True, **keywords):
"""
2013.2.10
"""
returnData = PassingData(no_of_jobs = 0)
returnData.jobDataLs = []
return returnData
示例12: getListTypeInfo
def getListTypeInfo(cls, affiliated_table_name=None, extra_condition=None, extra_tables=None):
"""
2009-3-9
handle the case in which there is no the where_condition at all.
2008-10-30
affiliated_table_name becomes optional
2008-10-19
add option extra_tables
2008-10-16
sort gene list type by biology_category_id and return other info as well
add -1 as a separator into list_type_id_ls
"""
if affiliated_table_name:
table_str = "%s s, %s p" % (affiliated_table_name, model.Stock_250kDB.GeneListType.table.name)
where_condition = ["p.id=s.list_type_id"]
else:
table_str = "%s p" % (model.Stock_250kDB.GeneListType.table.name)
where_condition = []
if extra_tables:
table_str += ", %s" % extra_tables
if extra_condition:
where_condition.append(extra_condition)
if where_condition: # 2009-3-9
where_condition = "where " + " and ".join(where_condition)
else:
where_condition = ""
rows = model.db.metadata.bind.execute(
"select distinct p.id, p.biology_category_id, p.short_name from %s \
%s order by p.biology_category_id, p.id"
% (table_str, where_condition)
)
list_type_id_ls = []
list_type_id2index = {}
list_type_label_ls = []
prev_biology_category_id = -1
no_of_separators = 0
for row in rows:
if prev_biology_category_id == -1:
prev_biology_category_id = row.biology_category_id
elif row.biology_category_id != prev_biology_category_id:
prev_biology_category_id = row.biology_category_id
no_of_separators += 1
list_type_id2index[-no_of_separators] = len(list_type_id_ls)
list_type_id_ls.append(-no_of_separators)
list_type_label_ls.append("====\n====")
list_type_id2index[row.id] = len(list_type_id_ls)
list_type_id_ls.append(row.id)
list_type_label_ls.append("%s %s" % (row.id, row.short_name))
list_info = PassingData()
list_info.list_type_id2index = list_type_id2index
list_info.list_type_id_ls = list_type_id_ls
list_info.list_type_label_ls = list_type_label_ls
return list_info
示例13: reduceAfterEachAlignment
def reduceAfterEachAlignment(self, workflow=None, passingData=None, mapEachChromosomeDataLs=None,\
reduceAfterEachChromosomeDataLs=None,\
transferOutput=True, **keywords):
"""
"""
returnData = PassingData(no_of_jobs = 0)
returnData.jobDataLs = []
returnData.mapEachChromosomeDataLs = mapEachChromosomeDataLs
returnData.reduceAfterEachChromosomeDataLs = reduceAfterEachChromosomeDataLs
return returnData
示例14: reduceEachVCF
def reduceEachVCF(self, workflow=None, chromosome=None, passingData=None, mapEachIntervalDataLs=None,\
transferOutput=True, **keywords):
"""
2013.05.01
#. concatenate all the sub-VCFs into one
"""
returnData = PassingData(no_of_jobs = 0)
returnData.jobDataLs = []
returnData.mapEachIntervalDataLs = mapEachIntervalDataLs
refineGenotypeJobLs = [pdata.refineGenotypeJob for pdata in mapEachIntervalDataLs]
mergeVCFReplicateColumnsJobLs = [pdata.mergeVCFReplicateColumnsJob for pdata in mapEachIntervalDataLs]
realInputVolume = passingData.jobData.file.noOfIndividuals * passingData.jobData.file.noOfLoci
baseInputVolume = 200*2000000
#base is 4X coverage in 20Mb region => 120 minutes
walltime = self.scaleJobWalltimeOrMemoryBasedOnInput(realInputVolume=realInputVolume, \
baseInputVolume=baseInputVolume, baseJobPropertyValue=60, \
minJobPropertyValue=60, maxJobPropertyValue=500).value
#base is 4X, => 5000M
job_max_memory = self.scaleJobWalltimeOrMemoryBasedOnInput(realInputVolume=realInputVolume, \
baseInputVolume=baseInputVolume, baseJobPropertyValue=2000, \
minJobPropertyValue=2000, maxJobPropertyValue=8000).value
self.concatenateOverlapIntervalsIntoOneVCFSubWorkflow(passingData=passingData, \
intervalJobLs=[pdata.beagleJob for pdata in mapEachIntervalDataLs],\
outputDirJob=self.beagleReduceDirJob, \
transferOutput=True, job_max_memory=job_max_memory, walltime=walltime,\
**keywords)
self.concatenateOverlapIntervalsIntoOneVCFSubWorkflow(passingData=passingData, \
intervalJobLs=refineGenotypeJobLs, outputDirJob=self.replicateVCFDirJob, \
transferOutput=True, job_max_memory=job_max_memory, walltime=walltime, \
**keywords)
self.concatenateOverlapIntervalsIntoOneVCFSubWorkflow(passingData=passingData, \
intervalJobLs=mergeVCFReplicateColumnsJobLs, outputDirJob=self.reduceOutputDirJob, \
transferOutput=True, job_max_memory=job_max_memory, walltime=walltime,\
**keywords)
for pdata in mapEachIntervalDataLs:
#add this output to the union job
"""
self.addInputToStatMergeJob(statMergeJob=self.reduceBeaglePhaseReplicateConcordanceJob_AllSites, \
parentJobLs=[pdata.beaglePhasedReplicateConcordanceJob])
self.addInputToStatMergeJob(statMergeJob=self.reduceBeaglePhaseReplicateConcordanceJob_HomoOnly, \
parentJobLs=[pdata.beaglePhasedReplicateConcordanceJob])
"""
self.addInputToStatMergeJob(statMergeJob=self.reduceTrioCallerReplicateConcordanceJob_AllSites, \
parentJobLs=[pdata.trioCallerReplicateConcordanceJob])
self.addInputToStatMergeJob(statMergeJob=self.reduceTrioCallerReplicateConcordanceJob_HomoOnly, \
parentJobLs=[pdata.trioCallerReplicateConcordanceJob])
return returnData
示例15: mapEachInterval
def mapEachInterval(self, workflow=None, alignmentData=None, intervalData=None,\
VCFJobData=None, passingData=None, transferOutput=True, **keywords):
"""
2012.9.17
"""
if workflow is None:
workflow = self
returnData = PassingData(no_of_jobs = 0)
returnData.jobDataLs = []
topOutputDirJob = passingData.topOutputDirJob
alignment = alignmentData.alignment
parentJobLs = alignmentData.jobLs
bamF = alignmentData.bamF
baiF = alignmentData.baiF
bamFnamePrefix = passingData.bamFnamePrefix
if intervalData.file:
mpileupInterval = intervalData.interval
bcftoolsInterval = intervalData.file
else:
mpileupInterval = intervalData.interval
bcftoolsInterval = intervalData.interval
intervalFileBasenameSignature = intervalData.intervalFileBasenameSignature
overlapInterval = intervalData.overlapInterval
overlapFileBasenameSignature = intervalData.overlapIntervalFnameSignature
VCFFile = VCFJobData.file
annotationName = passingData.annotationName
outputFile = File(os.path.join(topOutputDirJob.output, '%s_%s.%s.vcf'%(bamFnamePrefix, overlapFileBasenameSignature, annotationName)))
variantAnnotatorJob = self.addGATKVariantAnnotatorJob(workflow, executable=workflow.annotateVariantJava, \
GenomeAnalysisTKJar=workflow.GenomeAnalysisTKJar, bamFile=bamF, \
VCFFile=VCFFile, annotationName=annotationName, interval=bcftoolsInterval, outputFile=outputFile, \
refFastaFList=passingData.refFastaFList, parentJobLs=[topOutputDirJob]+parentJobLs,
extraDependentInputLs=[baiF, VCFFile.tbi_F], \
transferOutput=False, \
extraArguments=None, job_max_memory=4000)
outputFile = File(os.path.join(topOutputDirJob.output, '%s_%s.%s.tsv'%(bamFnamePrefix, overlapFileBasenameSignature, annotationName)))
extractInfoJob = self.addGenericJob(workflow=workflow, executable=workflow.ExtractInfoFromVCF, inputFile=variantAnnotatorJob.output, \
inputArgumentOption="-i", \
outputFile=outputFile, outputArgumentOption="-o", \
parentJobLs=[variantAnnotatorJob], extraDependentInputLs=None, extraOutputLs=None, transferOutput=False, \
extraArguments="-k %s"%(annotationName), extraArgumentList=None, job_max_memory=2000, sshDBTunnel=None, \
key2ObjectForJob=None)
returnData.jobDataLs.append(PassingData(jobLs=[variantAnnotatorJob, extractInfoJob], file=variantAnnotatorJob.output, \
fileLs=[variantAnnotatorJob.output, extractInfoJob.output]))
returnData.variantAnnotatorJob=variantAnnotatorJob
returnData.extractInfoJob=extractInfoJob
#add the sub-alignment to the alignment merge job
self.no_of_jobs += 2
return returnData