本文整理汇总了Python中pbcore.io.AlignmentSet.close方法的典型用法代码示例。如果您正苦于以下问题:Python AlignmentSet.close方法的具体用法?Python AlignmentSet.close怎么用?Python AlignmentSet.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pbcore.io.AlignmentSet
的用法示例。
在下文中一共展示了AlignmentSet.close方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_loadmetadata_from_dataset_create_cli
# 需要导入模块: from pbcore.io import AlignmentSet [as 别名]
# 或者: from pbcore.io.AlignmentSet import close [as 别名]
def test_loadmetadata_from_dataset_create_cli(self):
fn = tempfile.NamedTemporaryFile(suffix=".alignmentset.xml").name
fn2 = tempfile.NamedTemporaryFile(suffix=".alignmentset.xml").name
log.debug(fn)
aln = AlignmentSet(data.getXml(8))
aln.metadata.collections = None
aln.copyTo(fn)
aln.close()
del aln
self.assertTrue(os.path.exists(fn))
aln = AlignmentSet(fn)
self.assertFalse(aln.metadata.collections)
cmd = "dataset create --metadata {m} {o} {i}".format(
o=fn2,
i=fn,
m=("/pbi/dept/secondary/siv/testdata/"
"SA3-Sequel/lambda/roche_SAT/"
"m54013_151205_032353.subreadset.xml"))
log.debug(cmd)
o, r, m = backticks(cmd)
self.assertEqual(r, 0, m)
aln = AlignmentSet(fn2)
self.assertTrue(aln.metadata.collections)
示例2: test_membership_filter
# 需要导入模块: from pbcore.io import AlignmentSet [as 别名]
# 或者: from pbcore.io.AlignmentSet import close [as 别名]
def test_membership_filter(self):
aln = AlignmentSet(data.getXml(12))
self.assertEqual(len(list(aln)), 177)
hns = np.unique(aln.index.holeNumber)[:1]
aln.filters.addRequirement(zm=[('in', hns)])
self.assertEqual(len(list(aln)), 5)
aln = AlignmentSet(data.getXml(12))
self.assertEqual(len(list(aln)), 177)
hns = np.unique(aln.index.holeNumber)
aln.filters.addRequirement(zm=[('in', hns)])
self.assertEqual(len(list(aln)), 177)
aln = AlignmentSet(data.getXml(12))
self.assertEqual(len(list(aln)), 177)
hns = np.unique(aln.index.holeNumber)
hns = [n for _ in range(10000) for n in hns]
hns = np.array(hns)
aln.filters.addRequirement(zm=[('in', hns)])
self.assertEqual(len(list(aln)), 177)
aln = AlignmentSet(data.getXml(12))
self.assertEqual(len(list(aln)), 177)
hns = np.unique(aln.index.holeNumber)[:1]
hns = list(hns)
aln.filters.addRequirement(zm=[('in', hns)])
self.assertEqual(len(list(aln)), 5)
aln = AlignmentSet(data.getXml(12))
self.assertEqual(len(list(aln)), 177)
hns = np.unique(aln.index.holeNumber)[:1]
hns = set(hns)
aln.filters.addRequirement(zm=[('in', hns)])
self.assertEqual(len(list(aln)), 5)
aln = AlignmentSet(data.getXml(12))
self.assertEqual(len(list(aln)), 177)
qnames = [r.qName for r in aln[:10]]
aln.filters.addRequirement(qname=[('in', qnames)])
self.assertEqual(len(list(aln)), 10)
fn = tempfile.NamedTemporaryFile(suffix="alignmentset.xml").name
aln = AlignmentSet(data.getXml(12))
self.assertEqual(len(list(aln)), 177)
hns = np.unique(aln.index.holeNumber)[:1]
aln.filters.addRequirement(zm=[('in', hns)])
aln.write(fn)
aln.close()
aln2 = AlignmentSet(fn)
self.assertEqual(len(list(aln2)), 5)
示例3: ToolRunner
# 需要导入模块: from pbcore.io import AlignmentSet [as 别名]
# 或者: from pbcore.io.AlignmentSet import close [as 别名]
#.........这里部分代码省略.........
self._workQueue.put(chunk)
# Write sentinels ("end-of-work-stream")
for i in xrange(options.numWorkers):
self._workQueue.put(None)
def _printProfiles(self):
for profile in glob.glob(os.path.join(options.temporaryDirectory, "*")):
pstats.Stats(profile).sort_stats("time").print_stats(20)
def _cleanup(self):
if options.doProfiling:
logging.info("Removing %s" % options.temporaryDirectory)
shutil.rmtree(options.temporaryDirectory, ignore_errors=True)
def _setupEvidenceDumpDirectory(self, directoryName):
if os.path.exists(directoryName):
shutil.rmtree(directoryName)
os.makedirs(directoryName)
@property
def aborting(self):
return self._aborting
def abortWork(self, why):
"""
Performs a shutdown of all the slave processes. Called by the
monitoring thread when a child process exits with a non-zero,
or when a keyboard interrupt (Ctrl-C) is given. Not called
during normal shutdown.
"""
logging.error(why)
self._aborting = True
self._resultsQueue.close()
self._workQueue.close()
@property
def slaves(self):
return self._slaves
def main(self):
# This looks scary but it's not. Python uses reference
# counting and has a secondary, optional garbage collector for
# collecting garbage cycles. Unfortunately when a cyclic GC
# happens when a thread is calling cPickle.dumps, the
# interpreter crashes sometimes. See Bug 19704. Since we
# don't leak garbage cycles, disabling the cyclic GC is
# essentially harmless.
gc.disable()
parseOptions()
self._algorithm = self._algorithmByName(options.algorithm)
self._setupLogging()
random.seed(42)
logging.info("h5py version: %s" % h5py.version.version)
logging.info("hdf5 version: %s" % h5py.version.hdf5_version)
logging.info("ConsensusCore version: %s" %
(consensusCoreVersion() or "ConsensusCore unavailable"))
logging.info("Starting.")
atexit.register(self._cleanup)
if options.doProfiling:
self._makeTemporaryDirectory()
示例4: KineticsToolsRunner
# 需要导入模块: from pbcore.io import AlignmentSet [as 别名]
# 或者: from pbcore.io.AlignmentSet import close [as 别名]
#.........这里部分代码省略.........
logging.info("Using Chemistry matched IPD model: %s" % ipdModel)
self.ipdModel = IpdModel(contigs, ipdModel, self.args.modelIters)
def loadSharedAlignmentSet(self, cmpH5Filename):
"""
Read the input AlignmentSet so the indices can be shared with the
slaves. This is also used to pass to ReferenceUtils for setting up
the ipdModel object.
"""
logging.info("Reading AlignmentSet: %s" % cmpH5Filename)
logging.info(" reference: %s" % self.args.reference)
self.alignments = AlignmentSet(cmpH5Filename,
referenceFastaFname=self.args.reference)
# XXX this should ensure that the file(s) get opened, including any
# .pbi indices - but need to confirm this
self.refInfo = self.alignments.referenceInfoTable
def _mainLoop(self):
"""
Main loop
First launch the worker and writer processes
Then we loop over ReferenceGroups in the cmp.h5. For each contig we will:
1. Load the sequence into the main memory of the parent process
3. Chunk up the contig and submit the chunk descriptions to the work queue
Finally, wait for the writer process to finish.
"""
# This looks scary but it's not. Python uses reference
# counting and has a secondary, optional garbage collector for
# collecting garbage cycles. Unfortunately when a cyclic GC
# happens when a thread is calling cPickle.dumps, the
# interpreter crashes sometimes. See Bug 19704. Since we
# don't leak garbage cycles, disabling the cyclic GC is
# essentially harmless.
#gc.disable()
self.loadSharedAlignmentSet(self.args.alignment_set)
# Resolve the windows that will be visited.
if self.args.referenceWindowsAsString is not None:
self.referenceWindows = []
for s in self.args.referenceWindowsAsString.split(","):
try:
win = ReferenceUtils.parseReferenceWindow(s, self.alignments.referenceInfo)
self.referenceWindows.append(win)
except:
if self.args.skipUnrecognizedContigs:
continue
else:
raise Exception, "Unrecognized contig!"
elif self.args.referenceWindowsFromAlignment:
self.referenceWindows = ReferenceUtils.referenceWindowsFromAlignment(self.alignments, self.alignments.referenceInfo)
refNames = set([rw.refName for rw in self.referenceWindows])
# limit output to contigs that overlap with reference windows
self.refInfo = [r for r in self.refInfo if r.Name in refNames]
else:
self.referenceWindows = ReferenceUtils.createReferenceWindows(
self.refInfo)
# Load reference and IpdModel
self.loadReferenceAndModel(self.args.reference)
# Spawn workers
self._launchSlaveProcesses()
logging.info('Generating kinetics summary for [%s]' % self.args.alignment_set)
#self.referenceMap = self.alignments['/RefGroup'].asDict('RefInfoID', 'ID')
#self.alnInfo = self.alignments['/AlnInfo'].asRecArray()
# Main loop -- we loop over ReferenceGroups in the cmp.h5. For each contig we will:
# 1. Load the sequence into the main memory of the parent process
# 2. Fork the workers
# 3. chunk up the contig and
self.workChunkCounter = 0
# Iterate over references
for window in self.referenceWindows:
logging.info('Processing window/contig: %s' % (window,))
for chunk in ReferenceUtils.enumerateChunks(self.args.referenceStride, window):
self._workQueue.put((self.workChunkCounter, chunk))
self.workChunkCounter += 1
# Shutdown worker threads with None sentinels
for i in xrange(self.args.numWorkers):
self._workQueue.put(None)
for w in self._workers:
w.join()
# Join on the result queue and the resultsCollector process.
# This ensures all the results are written before shutdown.
self.monitoringThread.join()
self._resultsQueue.join()
self._resultCollectorProcess.join()
logging.info("ipdSummary.py finished. Exiting.")
self.alignments.close()
return 0
示例5: ToolRunner
# 需要导入模块: from pbcore.io import AlignmentSet [as 别名]
# 或者: from pbcore.io.AlignmentSet import close [as 别名]
#.........这里部分代码省略.........
chunks = reference.enumerateChunks(_id,
options.referenceChunkSize,
options.referenceWindows)
for chunk in chunks:
if self._aborting: return
self._workQueue.put(chunk)
# Write sentinels ("end-of-work-stream")
for i in xrange(options.numWorkers):
self._workQueue.put(None)
def _printProfiles(self):
for profile in glob.glob(os.path.join(options.temporaryDirectory, "*")):
pstats.Stats(profile).sort_stats("time").print_stats(20)
def _cleanup(self):
if options.doProfiling:
logging.info("Removing %s" % options.temporaryDirectory)
shutil.rmtree(options.temporaryDirectory, ignore_errors=True)
@property
def aborting(self):
return self._aborting
def abortWork(self, why):
"""
Performs a shutdown of all the slave processes. Called by the
monitoring thread when a child process exits with a non-zero,
or when a keyboard interrupt (Ctrl-C) is given. Not called
during normal shutdown.
"""
logging.error(why)
self._aborting = True
self._resultsQueue.close()
self._workQueue.close()
@property
def slaves(self):
return self._slaves
def main(self):
# This looks scary but it's not. Python uses reference
# counting and has a secondary, optional garbage collector for
# collecting garbage cycles. Unfortunately when a cyclic GC
# happens when a thread is calling cPickle.dumps, the
# interpreter crashes sometimes. See Bug 19704. Since we
# don't leak garbage cycles, disabling the cyclic GC is
# essentially harmless.
gc.disable()
random.seed(42)
if options.pdb or options.pdbAtStartup:
print("Process ID: %d" % os.getpid(), file=sys.stderr)
try:
import ipdb
except ImportError:
die("Debugging options require 'ipdb' package installed.")
if not options.threaded:
die("Debugging only works with -T (threaded) mode")
if options.pdbAtStartup:
ipdb.set_trace()