本文整理汇总了Python中General.removePath方法的典型用法代码示例。如果您正苦于以下问题:Python General.removePath方法的具体用法?Python General.removePath怎么用?Python General.removePath使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类General
的用法示例。
在下文中一共展示了General.removePath方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: parsePDB
# 需要导入模块: import General [as 别名]
# 或者: from General import removePath [as 别名]
# prepare starting conformation
p = parsePDB(args.p, chain = 'A') # assume the receptor is chain A
# parse the range
rangeS = []
for r in args.range:
r = r.split('-')
(start, end) = (r[0], r[-1])
end = str(int(end)+1)
rangeS.append(start +':'+end)
rangeS = ' '.join(rangeS)
pocket = p.select('resnum '+ rangeS)
writePDB('_pocket.pdb', pocket.copy()) # binding pocket
# use master to create pds files
Master.createPDS('_pocket.pdb')
tem = '_'+General.removePath(args.m)
os.system('cp '+args.m +' '+tem)
Master.createPDS(tem, query= False)
# search pocket in the template
Master.masterSearch('_pocket.pds', str.replace(tem, 'pdb', 'pds'), bbrmsd = False, topN =1, rmsdcut = 5.0)
# generate full match file for the template
Master.matchInFile('_pocket.pds', '__pocket.match', '_match', 'full')
ptem = parsePDB('_match/full1.pdb') # this is the superimposed templated structure
assert ptem.numChains() == 2
chains = []
for chain in ptem.iterChains():
chains.append(chain)
if chains[0].numResidues() < chains[1].numResidues(): # identify the peptide chain
pepchain = chains[0]
else:
示例2: len
# 需要导入模块: import General [as 别名]
# 或者: from General import removePath [as 别名]
if len(sys.argv) -1 != 2:
print '<usage> [designscore or abundance][a head of files]'
exit(0)
score, head = sys.argv[1:]
dirs = [x for x in os.listdir('.') if os.path.isdir(x)]
dirs.sort()
for d in dirs:
outf = d + '/' + score + '.' + head + '.info'
outfh = open(outf, 'w')
outfh.write('\t'.join(['residue_number', 'number_of_hits', 'number_of_contacts', 'number_of_residues'])+'\n')
lists = glob.glob(d + '/fragments/*.list')
lists.sort()
info = []
for lst in lists:
base = General.removePath(General.getBase(lst))
resnum = base.split('_')[-1][1:]
ncon = len(open(lst).readlines())
pdb = d + '/fragments/' + base + '.pdb'
nres = len(PDB.ConRes(pdb))
seqf = d + '/' + score + '/' + head + '_' + base + '.seq'
nhit = len(open(seqf).readlines())
record = [resnum, nhit, ncon, nres]
info.append(record)
info_sort = sorted(info, key = lambda x :int(x[0]))
for i_s in info_sort:
outstr = '\t'.join(map(str, i_s)) + '\n'
outfh.write(outstr)
outfh.close()
示例3: list
# 需要导入模块: import General [as 别名]
# 或者: from General import removePath [as 别名]
par.add_argument('--o', help = 'specify another location of the output')
par.add_argument('--dontuse', help = 'a file with some strings, and if the path of database targets contains these strings, they are excluded from the result; examples in /home/anthill/fzheng/home/termanal_upgrade/caspHomoStrict/')
par.add_argument('--env', action = 'store_true', help = 'whether using environment of adjust designscore')
par.add_argument('--minhits', type = int, help = 'the minimum number of hits that is required for each term')
par.add_argument('--post', action = 'store_true', help = 'if true, indicate post analysis, skip all searching step and do scoring directly')
par.add_argument('--opdb', action = 'store_true', help = 'if true, output three PDB files in which the B-factor is replaced by scores')
par.add_argument('--inter', nargs = '+', help = 'if not None, the searching is at interface residues; if not all interfaces are needed, provide N tuples, like "AB", "C", means the interface between A and B is not needed but between C and anything else is needed')
args = par.parse_args()
### preparing the program ###
from functions import *
SEARCHDB = '/home/anthill/fzheng/ironfs/searchDB/bc-30-sc-20141022'
# SEARCHDB = '/home/anthill/fzheng/ironfs/designScore/bc-30-sc-20140815'
dbname = General.removePath(SEARCHDB)
sub.call(['perl', '-w', '/home/grigoryanlab/library/MASTER/scripts/copyDBLocally.pl', '-n', dbname, '-l', SEARCHDB + '/list'])
ROTLIB = args.d + '/rotlib/RR2000.rotlib'
MASTER = args.m
CONFIND = args.c
pdbf = args.p
outdir = os.path.dirname(os.path.realpath(pdbf))
base = os.path.splitext(os.path.basename(pdbf))[0]
if args.inter != None:
args.inter = [ list(x) for x in args.inter]
# no matter what is the current directory, create a simlink called 'termnal_support' to 'support.default'.
term_support = 'termanal_support'
if not os.path.exists(term_support):
示例4: open
# 需要导入模块: import General [as 别名]
# 或者: from General import removePath [as 别名]
import General
from functions import *
par = argparse.ArgumentParser()
# par.add_argument('--p', required = True, help = 'a .pdb file')
par.add_argument('--dd', required = True, help = 'a directory with data; requires existence of fragment/, designscore/ and abundance/')
par.add_argument('--cut', type = int, help = 'a cutoff determining how many hits are "enough"')
par.add_argument('--h', nargs = 2, default = ['uniq_t1k', 'uniq_t1k'], help = 'the head of .seq files')
par.add_argument('--nh', required = True, help = 'head added to original files')
par.add_argument('--rmsd', default = '10.0', help = 'an rmsd cutoff, usually very loose to make sure enough hits can be found')
par.add_argument('--dontuse', help = 'target to exclude from search')
args = par.parse_args()
Master = '/home/anthill/fzheng/home/scripts/termanal_updating/'
SEARCHDB = '/home/searchDB/bc-30-sc-20141022/'
dbname = General.removePath(SEARCHDB)
sub.call(['perl', '-w', '/home/grigoryanlab/library/MASTER/scripts/copyDBLocally.pl', '-n', dbname, '-l', SEARCHDB + '/list'])
FRAGMENTS_OUT = args.dd + '/fragments/'
DESIGNSCORE_OUT = args.dd + '/designscore/'
ABUNDANCE_OUT = args.dd + '/abundance/'
# modify list according to homo, if needed
list_std = '/data/scratch/grigoryanlab/localDBs/' + dbname + '/list'
list_tmp = 'list_temp_termanal.out.' + General.removePath(args.dd)
if args.dontuse != None:
homo = open(args.dontuse).readlines()
homo = [x.strip() for x in homo]
list_tmpfh = open(list_tmp, 'w')
n_excluded = 0
for line_in_list in open(list_std):
示例5: str
# 需要导入模块: import General [as 别名]
# 或者: from General import removePath [as 别名]
if os.path.isfile(outname):
continue
if outname in seen:
continue
seen[outname] = 1
# for the gap test only
if args.wgap != None:
pdb = args.wgap + '/' + mut.dir + '/' + pdb
##
pos = PDB.findPositionInPDB(pdb, str(mut.n), mut.c)
if pos == -1:
print('cannot found the residue in fragment pdb: '+ pdb)
continue
pdb = General.removePath(pdb)
cmd = ['python', selfbin +'/envForMatches.py','--m', matchf, '--n', str(pos-1), '--o', outname]
if args.uplimit != None:
cmd.extend(['--uplimit', args.uplimit])
if args.wgap != None:
cmd.append('--wgap')
cmd = ' '.join(cmd)
job = General.jobOnCluster([cmd], mut.dir, os.path.realpath(outname))
job.submit(3)
jobs.append(job)
sleep(0.5)
os.chdir(odir)
while (len(jobs) > 0):
sleep(120)
示例6: open
# 需要导入模块: import General [as 别名]
# 或者: from General import removePath [as 别名]
par.add_argument('--sl', help = 'use a searchDB list file')
par.add_argument('--o', required = True, help = 'name of the output file')
args = par.parse_args()
out = open(args.o, 'w')
def outputSeq(seqs, name, out, chains = None):
if (chains != None) and (not isinstance(chains, list)):
chains = list(chains)
keys = seqs.keys()
keys.sort()
for k in keys:
if (chains != None) and (k not in chains):
continue
out.write('>' + name + '_' + k + '\n')
out.write(seqs[k]+'\n')
if args.sl == None:
for l in open(args.pl):
pid, cid = l.strip().split('_')
p = pid.lower() + '.clean.pdb'
seqs = PDB.pdb2seq(p)
outputSeq(seqs, pid.lower(), out, cid)
else:
for l in open(args.sl):
p = General.changeExt(l.rstrip('\n'), 'pdb')
seqs = PDB.pdb2seq(p)
name = General.removePath(p).split('.')[0]
outputSeq(seqs, name, out)
out.close()
示例7: open
# 需要导入模块: import General [as 别名]
# 或者: from General import removePath [as 别名]
__author__ = 'fanzheng'
import argparse, os
import numpy as np
import PDB, General
par = argparse.ArgumentParser()
par.add_argument('--s', required = True, help = 'the intact structure')
par.add_argument('--cid', help = 'if not None, specify a chain ID')
par.add_argument('--weights', nargs = '+', type = float, help = 'the weighting factor of terms')
par.add_argument('--o', required = True, help = 'the output file')
par.add_argument('--ext', required = True, help = 'the extension of scoring files')
par.add_argument('--t', default=1.0, type = float, help = 'temperature')
args = par.parse_args()
pid = General.removePath(args.s).split('.')[0]
residues = PDB.ConRes(args.s)
if args.cid != None:
residues = [x for x in residues if x.getChid() == args.cid]
outf = open(args.o, 'w')
weights = np.array(args.weights)
outf.write('#weights:' + '\t' + '\t'.join([str(x) for x in weights])+'\n')
aatypes = 'A C D E F G H I K L M N P Q R S T V W Y'
outf.write(aatypes + '\n')
for i in range(1, len(residues)-1):
res = residues[i]
resid = res.getChid() + str(res.getResnum())
scf = pid + '_' + resid + '.' + args.ext
if not os.path.isfile(scf):
示例8: open
# 需要导入模块: import General [as 别名]
# 或者: from General import removePath [as 别名]
import os, sys
import General
inp = sys.argv[1]
# read fasta of casp files
fa = '/home/anthill/fzheng/home/termanal_upgrade/caspModels.fa'
fals = open(fa).readlines()
length = {}
db1 = {}
db1list = '/home/anthill/fzheng/ironfs/designScore/bc-30-sc-20140815/list'
for l in open(db1list):
pid = General.getBase( General.removePath(l) )
db1[pid] = 1
db2 = {}
db2list = '/home/anthill/fzheng/home/searchDB/bc-30-sc-20141022-newpds/list'
for l in open(db2list):
pid = General.getBase( General.removePath(l) )
db2[pid] = 1
for i in range(len(fals)):
if fals[i].startswith('>'):
qid = fals[i][1:].split('.')[0]
length[qid] = len(fals[i+1].strip())
for inpl in open(inp):
items = inpl.strip().split()
qid = items[0].split('.')[0]
items[0] = qid
示例9: open
# 需要导入模块: import General [as 别名]
# 或者: from General import removePath [as 别名]
database_path = '/home/anthill/fzheng/home/searchDB/support_bc-30-sc-correct-20141022/others'
odir = os.getcwd()
ldir = General.createLocalSpace()
outfh = open(ldir + '/' + args.o, 'w')
uplimit = args.uplimit
nseq = 0
for match_line in open(args.m):
if (uplimit != None) and (nseq == uplimit):
break
match_line = match_line.strip()
indices = Analyze.index_from_match(match_line)
index1, index2 = indices[args.n[0]], indices[args.n[1]]
target_pds = match_line.split()[1]
targetid = General.getBase( General.removePath(match_line.split()[1]) )
env_dict = database_path + '/' + targetid[1:3] + '/' + targetid + '.freedom.db'
db = shelve.open(env_dict, 'r')
# extract post-processed pdb files from target_pds
resfile = database_path + '/' + targetid[1:3] + '/' + General.changeExt( General.removePath(target_pds), 'post.res')
allres = open(resfile).read().splitlines()
resid1, resid2 = allres[index1], allres[index2]
resid1, resid2 = resid1[0] + ',' + resid1[1:], resid2[0] + ',' + resid2[1:]
fields = ['sumcond', 'crwdnes', 'freedom', 'phi', 'psi', 'aa']
outfh.write(targetid + '\t')
if not resid1 in db:
outfh.write('\t'.join([resid1] + ['NA' for x in range(len(fields))]))
else:
res_info = db[resid1]
outfields = [str(res_info[x]) if x in res_info else 'NA' for x in fields]
示例10:
# 需要导入模块: import General [as 别名]
# 或者: from General import removePath [as 别名]
par.add_argument('--o', help = 'specify another location of the output')
par.add_argument('--dontuse', help = 'a file with some strings, and if the path of database targets contains these strings, they are excluded from the result; examples in /home/anthill/fzheng/home/termanal_upgrade/caspHomoStrict/')
par.add_argument('--env', action = 'store_true', help = 'whether using environment of adjust designscore')
par.add_argument('--opdb', action = 'store_true', help = 'if true, output three PDB files in which the B-factor is replaced by scores')
par.add_argument('--raw', action = 'store_true', help = 'if true, also save unsmoothed version of scores')
par.add_argument('--ext', nargs = 3, default = ['dsc50', 'abd50', 'ssc50'], help ='extension of output files')
par.add_argument('--hrs', type = int, default = 3, help = 'time allocated on anthill cluster')
args = par.parse_args()
timestart = time.time()
### preparing the program ###
from functions import *
SEARCHDB = '/home/anthill/fzheng/ironfs/searchDB/bc-30-sc-20141022' # this is for easiness on our own system
dbname = General.removePath(SEARCHDB)
sub.call(['perl', '-w', '/home/grigoryanlab/library/MASTER/scripts/copyDBLocally.pl', '-n', dbname, '-l', SEARCHDB + '/list'])
ROTLIB = args.d + '/rotlib/RR2000.rotlib'
MASTER = args.m
CONFIND = args.c
pdbf = args.p
outdir = os.path.dirname(os.path.realpath(pdbf))
base = os.path.splitext(os.path.basename(pdbf))[0]
# no matter what is the current directory, create a simlink called 'termnal_support' to 'support.default'.
term_support = 'termanal_support'
if not os.path.exists(term_support):
os.system('ln -s ' + args.d + ' ' + term_support)
# if set another location for output
示例11: len
# 需要导入模块: import General [as 别名]
# 或者: from General import removePath [as 别名]
import os, sys
import General, PDB
if len(sys.argv) - 1 != 2:
print '<usage> [list pdb file] [output .fa file]'
exit(0)
lst, fasta = sys.argv[1:]
out = open(fasta, 'w')
for l in open(lst):
pdbf = l.strip()
name = General.removePath(pdbf)
seqs = PDB.pdb2seq(pdbf)
out.write('>'+pdbf+'\n')
for c in seqs: # because only single chain
out.write(seqs[c]+'\n')
示例12: len
# 需要导入模块: import General [as 别名]
# 或者: from General import removePath [as 别名]
# output file names
nr_matchf = args.outh + '_' + args.m
nr_seqf = General.changeExt(nr_matchf, 'seq')
nr_env = None
oenv = General.changeExt(args.m, args.env)
if os.path.isfile(oenv):
nr_env = General.changeExt(nr_matchf, args.env)
# write a custom .fasta file
for match in matches:
match_region_indices = Analyze.index_from_match(match)
central_index = match_region_indices[args.cres - 1]
match_id = General.getBase( General.removePath( match.split()[1] ) )
fullsequence = database[match_id]
if central_index - args.wd < 1:
seqcontext = fullsequence[0:(2 * args.wd + 1)]
elif central_index + args.wd > len(fullsequence):
seqcontext = fullsequence[-(2 * args.wd + 1):]
else:
seqcontext = fullsequence[(central_index - args.wd - 1):(central_index + args.wd)]
tempfh.write('>match:'+str(matchind)+'\n'+seqcontext+'\n')
if args.conres != None:
con_index = match_region_indices[args.conres -1]
if con_index - args.wd < 1:
seqcontext = fullsequence[0:(2 * args.wd + 1)]
elif central_index + args.wd > len(fullsequence):
seqcontext = fullsequence[-(2 * args.wd + 1):]