本文整理汇总了Python中unique.filepath函数的典型用法代码示例。如果您正苦于以下问题:Python filepath函数的具体用法?Python filepath怎么用?Python filepath使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了filepath函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Sashimiplottting
def Sashimiplottting(bamdir,countsin,PSIFilename,eventsToVisualizeFilename,events=None):
PSIFilename = unique.filepath(PSIFilename)
header=True
junction_max=[]
countsin = unique.filepath(countsin)
count_sum_array=[]
count=0
for line in open(countsin,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if header:
samples = []
for s in t[1:]:
if '.bed' not in s: s+='.bed'
samples.append(s)
header=False
count_sum_array=[0]*len(samples)
else:
values = map(float,t[1:])
count_sum_array = [sum(value) for value in zip(*[count_sum_array,values])]
count+=1
if count >30000 and 'salomonis' in bamdir: break
index=0
for sample in samples:
count_sum_array_db[sample] = count_sum_array[index]
index+=1
if events==None:
#print 'Preparing Sashimi-Input:',eventsToVisualizeFilename
eventsToVisualizeFilename = unique.filepath(eventsToVisualizeFilename)
gene_to_symbol=sashmi_plot_list(bamdir,eventsToVisualizeFilename,PSIFilename,events=events)
return gene_to_symbol
示例2: Sashimiplottting
def Sashimiplottting(bamdir,countsin,inputpsi,genelis):
inputpsi = unique.filepath(inputpsi)
text_file = open(inputpsi,'rU')
lines = text_file.readlines()
text_file.close()
samp=sample(inputpsi)
gene_label,gene_sym=genelist(inputpsi)
header=True
junction_max=[]
countsin = unique.filepath(countsin)
for line in open(countsin,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if header:
samples = t[1:]
header=False
exon_sum_array=[0]*len(samples)
count_sum_array=[0]*len(samples)
else:
values = map(float,t[1:])
count_sum_array = [sum(value) for value in zip(*[count_sum_array,values])]
for i in range(len(samp)):
sample_read[samp[i]]=count_sum_array[i]
#print samp[i],sample_read[samp[i]]
genelis = unique.filepath(genelis)
sashmi_plot_list(bamdir,genelis,gene_label,lines,samp,gene_sym)
示例3: retreiveAllKnownSpliceSites
def retreiveAllKnownSpliceSites():
### Uses a priori strand information when none present
import export, unique
chromosomes_found={}
parent_dir = export.findParentDir(bam_file)
for file in os.listdir(parent_dir):
if 'AltAnalyze_report' in file and '.log' in file:
log_file = unique.filepath(parent_dir+'/'+file)
log_contents = open(log_file, "rU")
species_tag = ' species: '
for line in log_contents:
line = line.rstrip()
if species_tag in line:
species = string.split(line,species_tag)[1]
splicesite_db={}
refExonCoordinateFile = unique.filepath('AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_exon.txt')
firstLine=True
for line in open(refExonCoordinateFile,'rU').xreadlines():
if firstLine: firstLine=False
else:
line = line.rstrip('\n')
t = string.split(line,'\t'); #'gene', 'exon-id', 'chromosome', 'strand', 'exon-region-start(s)', 'exon-region-stop(s)', 'constitutive_call', 'ens_exon_ids', 'splice_events', 'splice_junctions'
geneID, exon, chr, strand, start, stop = t[:6]
#start = int(start); stop = int(stop)
#geneID = string.split(exon,':')[0]
splicesite_db[chr,start]=strand
splicesite_db[chr,stop]=strand
if len(chr)<5 or ('GL0' not in chr and 'GL' not in chr and 'JH' not in chr and 'MG' not in chr):
chromosomes_found[string.replace(chr,'chr','')] = []
return splicesite_db,chromosomes_found
示例4: importAgilentExpressionValues
def importAgilentExpressionValues(filename,array,channel_to_extract):
""" Imports Agilent Feature Extraction files for one or more channels """
print '.',
red_expr_db={}
green_expr_db={}
parse=False
fn=unique.filepath(filename)
for line in open(fn,'rU').xreadlines():
data = UI.cleanUpLine(line)
if parse==False:
if 'ProbeName' in data:
headers = string.split(data,'\t')
pn = headers.index('ProbeName')
try: gc = headers.index('gProcessedSignal')
except Exception: pass
try: rc = headers.index('rProcessedSignal')
except Exception: pass
parse = True
else:
t = string.split(data,'\t')
probe_name = t[pn]
try: green_channel = math.log(float(t[gc])+1,2) #min is 0
except Exception: pass
try: red_channel = math.log(float(t[rc])+1,2) #min is 0
except Exception: pass
if 'red' in channel_to_extract:
red_expr_db[probe_name] = red_channel
if 'green' in channel_to_extract:
green_expr_db[probe_name] = green_channel
if 'red' in channel_to_extract:
red_channel_db[array] = red_expr_db
if 'green' in channel_to_extract:
green_channel_db[array] = green_expr_db
示例5: reimportFeatures
def reimportFeatures(featureFile):
""" Import the exon and gene coordinates """
gene_event_db={}
featureFile = unique.filepath(featureFile)
head=0
for line in open(featureFile,'rU').xreadlines():
#for k in range(len(strand['AltAnalyze_ID'])):
if head ==0: head=1
else:
line = line.rstrip('\n')
event=string.split(line,'\t')[0] #example event: ENSMUSG00000025915:E17.2-E17.5=chr1:9885753-9886047
event = string.replace(event,':','__')
event_split=string.split(event,'__')
for i in range(len(event_split)):
if "ENS" in event_split[i] or '00000' in event_split[i]:
if '-' in event_split[i]:
ji=string.split(event_split[i],'-')
gene=ji[1]
else:
gene=event_split[i]
featureID,position = string.split(event,'=') ### store the feature (exon or junction) position and ID separately
pd = PositionData(position)
if gene in gene_event_db:
feature_db = gene_event_db[gene]
feature_db[featureID] = pd
else:
feature_db = {featureID:pd}
gene_event_db[gene]=feature_db
return gene_event_db
示例6: extractFeatures
def extractFeatures(species,countinp):
import export
ExonsPresent=False
if 'counts.' in countinp:
feature_file = string.replace(countinp,'counts.','features.')
fe = export.ExportFile(feature_file)
firstLine = True
for line in open(countinp,'rU').xreadlines():
if firstLine: firstLine=False
else:
feature_info = string.split(line,'\t')[0]
fe.write(feature_info+'\n')
if ExonsPresent == False:
exon = string.split(feature_info,'=')[0]
if '-' not in exon:
ExonsPresent = True
### Add exon-info if necessary
if ExonsPresent == False:
exons_file = unique.filepath('AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_exon.txt')
firstLine = True
for line in open(exons_file,'rU').xreadlines():
if firstLine: firstLine=False
else:
line = line.rstrip('\n')
t = string.split(line,'\t')
gene = t[0]
exon = t[1]
chr = t[2]
strand = t[3]
start = t[4]
end = t[5]
fe.write(gene+':'+exon+'='+chr+':'+start+'-'+end+'\n')
fe.close()
return feature_file
示例7: genelist
def genelist(fname):
fname = unique.filepath(fname)
header=True
for line in open(fname,'rU').xreadlines():
line = line.rstrip(os.linesep)
if header: header = False
else:
t=string.split(line,'\t')
try:
### Re-order these to have the exclusion be listed first
j1a,j1b = string.split(t[2],'-')
j2a,j2b = string.split(t[3],'-')
j1a = string.split(j1a,':')[1]
j2a = string.split(j2a,':')[1]
j1a = int(float(string.split(j1a,'.')[0][1:]))
j1b = int(float(string.split(j1b,'.')[0][1:]))
j2a = int(float(string.split(j2a,'.')[0][1:]))
j2b = int(float(string.split(j2b,'.')[0][1:]))
#print [j1a,j2a,j1b,j2b], t[2], t[3]
if j1a>j2a or j1b<j2b:
val = t[2]+' '+t[3]
else:
val=t[3]+' '+t[2]
except Exception:
#print traceback.format_exc();sys.exit()
val=t[2]+' '+t[3]
if '-' not in t[2]:
val = t[3]+' '+t[2]
val = string.replace(val,":","__")
lis.append(val)
#print t[0]
return lis
示例8: importExpressionValues
def importExpressionValues(filename):
""" Imports tab-delimited expression values"""
header = True
sample_expression_db = {}
fn = unique.filepath(filename)
for line in open(fn, "rU").xreadlines():
data = UI.cleanUpLine(line)
if header:
sample_names = string.split(data, "\t")
header = False
else:
exp_values = string.split(data, "\t")
gene = exp_values[0]
index = 1
for value in exp_values[1:]:
sample_name = sample_names[index]
if sample_name in sample_expression_db:
gene_expression_db = sample_expression_db[sample_name]
gene_expression_db[gene] = value
else:
gene_expression_db = {}
gene_expression_db[gene] = value
sample_expression_db[sample_name] = gene_expression_db
index += 1
return sample_expression_db
示例9: indexdic
def indexdic(fname):
fname = unique.filepath(fname)
head=0
for line in open(fname,'rU').xreadlines():
#for k in range(len(a['AltAnalyze_ID'])):
if head ==0:
head=1
continue
else:
a=string.split(line,'\t')
#p=a['AltAnalyze_ID'][k]
p=a[0]
j=string.split(p,':')
#print j[0]
for i in range(len(j)):
if "ENS" in j[i]:
if '-' in j[i]:
ji=string.split(j[i],'-')
jj=ji[1]
else:
jj=j[i]
#print jj,'first check'
if jj in index_read:
index_read[jj].append(p)
else:
index_read[jj]=[p,]
return index_read
示例10: importPSIJunctions
def importPSIJunctions(fname):
All_PSI_Reciprocol_Junctions=[]
fname = unique.filepath(fname)
header=True
for line in open(fname,'rU').xreadlines():
line = line.rstrip(os.linesep)
if header: header = False
else:
t=string.split(line,'\t')
junction1 = t[2]
junction2 = t[3]
try:
### Re-order these to have the exclusion be listed first
j1a,j1b = string.split(t[2],'-')
j2a,j2b = string.split(t[3],'-')
j1a = string.split(j1a,':')[1]
j2a = string.split(j2a,':')[1]
j1a = int(float(string.split(j1a,'.')[0][1:]))
j1b = int(float(string.split(j1b,'.')[0][1:]))
j2a = int(float(string.split(j2a,'.')[0][1:]))
j2b = int(float(string.split(j2b,'.')[0][1:]))
#print [j1a,j2a,j1b,j2b], t[2], t[3]
event1 = string.replace(junction2,":","__") ### first listed junction
event2 = string.replace(junction2,":","__") ### second listed junction
if j1a>j2a or j1b<j2b:
event_pair = event1,event2
else:
event_pair=event2,event1
except Exception:
#print traceback.format_exc();sys.exit()
event_pair=event1,event2
if '-' not in event1:
event_pair = event2,event1
All_PSI_Reciprocol_Junctions.append(event_pair)
return All_PSI_Reciprocol_Junctions
示例11: genelist
def genelist(fname):
fname = unique.filepath(fname)
for line in open(fname,'rU').xreadlines():
line = line.rstrip(os.linesep)
t=string.split(line,'\t')
val=t[2]+' '+t[3]
lis.append(val)
#print t[0]
return lis
示例12: extractFeatures
def extractFeatures(species,countsFileDir):
import export
ExonsPresent=False
lastgene = None
lastend = None
genes_detected={}
count=0
first_last_exons = {} ### Make strand fake junction comprised of the first and last exon
if 'counts.' in countsFileDir:
### The feature_file contains only ExonID or Gene IDs and associated coordinates
feature_file = string.replace(countsFileDir,'counts.','features.')
fe = export.ExportFile(feature_file)
firstLine = True
for line in open(countsFileDir,'rU').xreadlines():
if firstLine: firstLine=False
else:
feature_info = string.split(line,'\t')[0]
fe.write(feature_info+'\n')
junction_annotation = string.split(feature_info,'=')[0]
if '-' in junction_annotation:
geneid = string.split(junction_annotation,':')[0]
genes_detected[geneid]=[]
if ExonsPresent == False:
exon = string.split(feature_info,'=')[0]
if '-' not in exon:
ExonsPresent = True
### Add exon-info if necessary
exons_file = unique.filepath('AltDatabase/ensembl/'+species+'/'+species+'_Ensembl_exon.txt')
firstLine = True
for line in open(exons_file,'rU').xreadlines():
if firstLine: firstLine=False
else:
line = line.rstrip('\n')
t = string.split(line,'\t')
gene,exon,chr,strand,start,end = t[:6]
if gene!=lastgene:
if len(genes_detected)==0 or gene in genes_detected: ### restrict to detected genes
first_last_exons[gene,strand] = [(chr,start)]
if len(genes_detected)==0 or lastgene in genes_detected: ### restrict to detected genes
try: first_last_exons[lastgene,laststrand].append(lastend)
except Exception:
pass ### occurs for the first gene
if ExonsPresent == False:
fe.write(gene+':'+exon+'='+chr+':'+start+'-'+end+'\n')
lastgene = gene; lastend = end; laststrand = strand
if len(genes_detected)==0 or lastgene in genes_detected:
first_last_exons[lastgene,laststrand].append(lastend)
### Add strand fake junction for the whole gene
for (gene,strand) in first_last_exons:
(chr,start),end = first_last_exons[gene,strand]
if strand == '-':
start,end = end,start # Need to encode strand in this annotation, do this by strand orienting the positions
fe.write(gene+':E1.1-E100.1'+'='+chr+':'+start+'-'+end+'\n')
fe.close()
return feature_file ### return the location of the exon and gene coordinates file
示例13: filepath
def filepath(filename):
try:
import unique ### local to AltAnalyze
fn = unique.filepath(filename)
except Exception:
### Should work fine when run as a script with this (AltAnalyze code is specific for packaging with AltAnalyze)
dir=os.path.dirname(dirfile.__file__)
try: dir_list = os.listdir(filename); fn = filename ### test to see if the path can be found (then it is the full path)
except Exception: fn=os.path.join(dir,filename)
return fn
示例14: update_plot_settings
def update_plot_settings(bamdir, group_psi_values, sample_headers):
### This functions writes out the sample orders, colors and sequence coverage for each BAM files for SashimiPlot
bams = []
sample_colors = []
sample_coverage = []
colors = [
"red",
"blue",
"green",
"grey",
"orange",
"purple",
"yellow",
"peach",
"pink",
"violet",
"magenta",
"navy",
]
colors = colors * 300
color_index = 0
for group in group_psi_values:
for index in group_psi_values[group]:
g = sample_headers[index].replace(".bed", ".bam")
bams.append('"' + g + '"')
sample_colors.append('"' + colors[color_index] + '"')
sample_coverage.append(str(int(sampleReadDepth[index])))
color_index += 1 ### reset for the new group
bams = string.join(bams, ",")
sample_colors = string.join(sample_colors, ",")
sample_coverage = string.join(sample_coverage, ",")
export_pl = open(unique.filepath("Config/sashimi_plot_settings.txt"), "w")
export_pl.write("[data]\n")
export_pl.write("bam_prefix = " + bamdir + "\n")
export_pl.write("bam_files =[" + bams + "]\n")
export_pl.write("\n")
export_pl.write("[plotting]")
export_pl.write("\n")
export_pl.write("fig_width = 7 \nfig_height = 7 \nintron_scale = 30 \nexon_scale = 4 \nlogged = False\n")
export_pl.write("font_size = 6 \nbar_posteriors = False \nnyticks = 4 \nnxticks = 4 \n")
export_pl.write("show_ylabel = False \nshow_xlabel = True \nshow_posteriors = False \nnumber_junctions = True \n")
export_pl.write("resolution = .5 \nposterior_bins = 40 \ngene_posterior_ratio = 5 \n")
export_pl.write("colors =[" + sample_colors + "]\n")
export_pl.write("coverages =[" + sample_coverage + "]\n")
export_pl.write('bar_color = "b" \nbf_thresholds = [0, 1, 2, 5, 10, 20]')
export_pl.close()
示例15: searchDirectory
def searchDirectory(directory,var,secondary=None):
directory = unique.filepath(directory)
files = unique.read_directory(directory)
for file in files:
if var in file:
if secondary== None:
return directory+'/'+file
break
elif secondary in file:
return directory+'/'+file
break
### if all else fails
return directory+'/'+file