本文整理汇总了Python中subprocess.Popen.join方法的典型用法代码示例。如果您正苦于以下问题:Python Popen.join方法的具体用法?Python Popen.join怎么用?Python Popen.join使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类subprocess.Popen
的用法示例。
在下文中一共展示了Popen.join方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from subprocess import Popen [as 别名]
# 或者: from subprocess.Popen import join [as 别名]
def main():
parser = argparse.ArgumentParser(description="Split aligned reads into chromosomes",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input',help="sorted samtools indexed BAM file")
parser.add_argument('-o','--output',help="output directory")
args = parser.parse_args()
m = re.search('([^\/]+)\.bam$',args.input)
if not m:
sys.stderr.write("bam file must end in .bam")
sys.exit()
basename = m.group(1)
args.output = args.output.rstrip('/')
if not os.path.exists(args.output):
os.makedirs(args.output)
cmd = "samtools view -H "+args.input
p = Popen(cmd.split(),stdout=PIPE)
chrs = []
for line in p.stdout:
m = re.match('@SQ\tSN:(\S+)',line)
if not m: continue
chrs.append(m.group(1))
p.communicate()
ps = []
for chr in chrs:
ps.append(Process(target=do_chr,args=(chr,args,basename,)))
ps.append(Process(target=do_un,args=(args,basename,)))
for p in ps: p.start()
for p in ps: p.join()
示例2: main
# 需要导入模块: from subprocess import Popen [as 别名]
# 或者: from subprocess.Popen import join [as 别名]
def main():
parser = argparse.ArgumentParser(description="",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input',help="Sorted bam (preferrably indexed) Use - for STDIN sam. If streaming in be sure to remove unmapped reads")
parser.add_argument('--threads',type=int,default=1,help="use multiple threads the bam has been indexed. Order is not preserved.")
args = parser.parse_args()
single_thread = True
if args.threads == 1: single_thread = True
elif args.input != '-':
if os.path.isfile(args.input+'.bai'): single_thread = False
else:
single_thread = True
sys.stderr.write("Warning doing single thread because lacking index\n")
chrs = None
if args.input != '-':
chrs = set()
cmd = 'samtools view -H '+args.input
p = Popen(cmd.split(),stdout=PIPE)
for line in p.stdout:
m = re.match('@SQ\s+SN:(\S+)\s+LN:\d+',line)
if m: chrs.add(m.group(1))
p.communicate()
#easy case of single thread
if single_thread:
if args.input == '-':
dostream(sys.stdin)
else:
cmd = 'samtools view -F 4 -h '+args.input
p = Popen(cmd.split(),stdout=PIPE)
dostream(p.stdout)
p.communicate()
else:
p = Pool(processes=args.threads)
for chr in sorted(chrs):
p.apply_async(dofilestream,args=(args.input,chr),callback=printres)
p.close()
p.join()
示例3: main
# 需要导入模块: from subprocess import Popen [as 别名]
# 或者: from subprocess.Popen import join [as 别名]
def main():
parser = argparse.ArgumentParser(description="Takes a BAM file preferably one already filtered to be uniquely mapped reads.")
parser.add_argument('input_fasta',help="FASTAFILE indexed")
parser.add_argument('input_sorted_bam',help="BAMFILE sorted indexed")
parser.add_argument('--threads',type=int,default=multiprocessing.cpu_count(),help="Number of threads defautl cpu_count")
parser.add_argument('--include_multiply_mapped_reads',action='store_true',help="Include multiply mapped reads that are excluded by default. Note that this feature is not complete as it is with the 256 sam filter. it will only remove secondary alignments while still leaving the multiply mapped primary alignments. To only use uniquely mapped reads you need to pre-filter on unique and start from that indexed bam.")
parser.add_argument('--include_indels',action='store_true',help="By default only SNPs and only loci with multiple genotypes are output. This will output indels.")
parser.add_argument('--consensus',action='store_true',help="Use the original caller")
args = parser.parse_args()
#read the sam header
p = Popen(('samtools view -H '+args.input_sorted_bam).split(),stdout=PIPE)
chromlens = {}
for line in p.stdout:
m = re.match('@SQ\s+SN:(\S+)\s+LN:(\d+)',line.rstrip())
if not m: continue
chromlens[m.group(1)] = int(m.group(2))
#Lets break these up now
z = 0
itersize = 10000000
for chrom in chromlens:
for i in range(1,chromlens[chrom],itersize):
z+=1
global gtotal
gtotal = z
if args.threads > 1:
p = multiprocessing.Pool(processes=args.threads)
for chrom in chromlens:
for i in range(1,chromlens[chrom],itersize):
rstart = i
rend = itersize+i-1
if rend > chromlens[chrom]: rend = chromlens[chrom]
if args.threads <= 1:
v = get_region_vcf(args,chrom,rstart,rend)
do_output(v)
else:
p.apply_async(get_region_vcf,args=(args,chrom,rstart,rend),callback=do_output)
if args.threads > 1:
p.close()
p.join()
示例4: MMPSimpleRaytracer
# 需要导入模块: from subprocess import Popen [as 别名]
# 或者: from subprocess.Popen import join [as 别名]
#.........这里部分代码省略.........
:param int stageID: optional argument identifying
solution stage (default 0)
:param bool runInBackground: optional argument, defualt False.
If True, the solution will run in background (in separate thread
or remotely).
"""
# Check that old simulation is not running:
if not self.isSolved():
return
# Set current tstep
self._curTStep = tstep
# This thread will callback when tracer has ended.
self.postThread = threading.Thread(target=self._runCallback,
args=(self.tracerProcess,
self._tracerProcessEnded))
# Post processing thread will wait for the tracer to finnish
self.postThread.start()
# Wait for process if applicaple
if not runInBackground:
self.wait()
def wait(self):
"""
Wait until solve is completed when executed in background.
"""
print("Waiting...")
self.tracerProcess.wait()
print("Post processing...")
self.postThread.join()
print("Post processing done!")
def isSolved(self):
"""
:return: Returns true or false depending whether solve has completed
when executed in background.
:rtype: bool
"""
if self.tracerProcess.poll() is not None:
return True
if self.postThread.poll() is not None:
return True
return False
def getCriticalTimeStep(self):
"""
:return: Returns the actual (related to current state) critical time
step increment
:rtype: float
"""
# TODO: Check
return np.Infinity # Tracer does not have any time dependence...
def getAPIVersion(self):
"""
:return: Returns the supported API version
:rtype: str, int
"""
# TODO: API version support?? How
return('1.0', 1)
示例5: MMPRaytracer
# 需要导入模块: from subprocess import Popen [as 别名]
# 或者: from subprocess.Popen import join [as 别名]
#.........这里部分代码省略.........
self._writeInputJSON(tstep)
# Get mie data from other app
self._getMieData(tstep)
# Start thread to start calculation
self.tracerProcess = Popen( # ["ping", "127.0.0.1", "-n",
# "3", "-w", "10"],
# self.tracerProcess = Popen(["tracer",
# "DefaultLED.json"],
["tracer-no-ray-save", "input.json"],
stdout=PIPE,
stderr=PIPE)
# This thread will callback when tracer has ended.
self.postThread = threading.Thread(target=self._runCallback,
args=(self.tracerProcess,
self._tracerProcessEnded))
# Post processing thread will wait for the tracer to finnish
logger.info('Ray tracing starting...')
self.postThread.start()
# Wait for process if applicaple
if not runInBackground:
self.wait()
def wait(self):
"""
Wait until solve is completed when executed in background.
"""
logger.debug("Tracing...")
self.tracerProcess.wait()
logger.debug("Post processing...")
self.postThread.join()
logger.debug("Post processing done!")
def isSolved(self):
"""
:return: Returns true or false depending whether solve has completed
when executed in background.
:rtype: bool
"""
if self.tracerProcess.poll() is not None:
return True
if self.postThread.poll() is not None:
return True
return False
def getCriticalTimeStep(self):
"""
:return: Returns the actual (related to current state) critical time
step increment
:rtype: float
"""
# TODO: Check
return np.Infinity # Tracer does not have any time dependence...
def getAPIVersion(self):
"""
:return: Returns the supported API version
:rtype: str, int
"""
# TODO: API version support?? How
return('1.0', 1)
def getApplicationSignature(self):
示例6: writeToLog
# 需要导入模块: from subprocess import Popen [as 别名]
# 或者: from subprocess.Popen import join [as 别名]
writeToLog(logfile,"** Generating new *.xml files **")
i = 0
# Loop through all inpaths and outfiles
while i <= (len(outfiles)-threadCount):
# Case where full threads are used
threads = threadCount
# Create queue and pool variables
queue0 = manager0.Queue() ; pool = []
for n in range(threads):
p = Process(target=xmlWrite,args=(outfiles_paths[i+n],outfiles[i+n],host_path,cdat_path,start_time,queue0))
p.start() ; pool.append(p)
# Wait for processes to terminate
for p in pool:
p.join()
# Get data back from queue object
inpaths = [] ; outfileNames = [] ; fileZeros = [] ; fileWarnings = [] ; fileNoReads = [] ; fileNoWrites = [] ; fileNones = [] ; errorCodes = [] ; time_since_starts = []
while not queue0.empty():
[inpath,outfileName,fileZero,fileWarning,fileNoRead,fileNoWrite,fileNone,errorCode,time_since_start] = queue0.get_nowait()
inpaths.append(inpath)
outfileNames.append(outfileName)
fileZeros.append(fileZero)
fileWarnings.append(fileWarning)
fileNoReads.append(fileNoRead)
fileNoWrites.append(fileNoWrite)
fileNones.append(fileNone)
errorCodes.append(errorCode)
time_since_starts.append(time_since_start)