本文整理汇总了Python中toil.common.Toil.getWorkflowDir方法的典型用法代码示例。如果您正苦于以下问题:Python Toil.getWorkflowDir方法的具体用法?Python Toil.getWorkflowDir怎么用?Python Toil.getWorkflowDir使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类toil.common.Toil
的用法示例。
在下文中一共展示了Toil.getWorkflowDir方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: workerCleanup
# 需要导入模块: from toil.common import Toil [as 别名]
# 或者: from toil.common.Toil import getWorkflowDir [as 别名]
def workerCleanup(info):
"""
Cleans up the worker node on batch system shutdown. Also see :meth:`supportsWorkerCleanup`.
:param WorkerCleanupInfo info: A named tuple consisting of all the relevant information
for cleaning up the worker.
"""
assert isinstance(info, WorkerCleanupInfo)
workflowDir = Toil.getWorkflowDir(info.workflowID, info.workDir)
if (info.cleanWorkDir == 'always'
or info.cleanWorkDir in ('onSuccess', 'onError') and os.listdir(workflowDir) == []):
shutil.rmtree(workflowDir)
示例2: main
# 需要导入模块: from toil.common import Toil [as 别名]
# 或者: from toil.common.Toil import getWorkflowDir [as 别名]
def main():
logging.basicConfig()
##########################################
#Import necessary modules
##########################################
# This is assuming that worker.py is at a path ending in "/toil/worker.py".
sourcePath = os.path.dirname(os.path.dirname(__file__))
if sourcePath not in sys.path:
sys.path.append(sourcePath)
#Now we can import all the necessary functions
from toil.lib.bioio import setLogLevel
from toil.lib.bioio import getTotalCpuTime
from toil.lib.bioio import getTotalCpuTimeAndMemoryUsage
from toil.lib.bioio import makePublicDir
from toil.lib.bioio import system
from toil.job import Job
##########################################
#Input args
##########################################
jobStoreString = sys.argv[1]
jobStoreID = sys.argv[2]
##########################################
#Load the jobStore/config file
##########################################
jobStore = Toil.loadOrCreateJobStore(jobStoreString)
config = jobStore.config
##########################################
#Create the worker killer, if requested
##########################################
if config.badWorker > 0 and random.random() < config.badWorker:
def badWorker():
#This will randomly kill the worker process at a random time
time.sleep(config.badWorkerFailInterval * random.random())
os.kill(os.getpid(), signal.SIGKILL) #signal.SIGINT)
#TODO: FIX OCCASIONAL DEADLOCK WITH SIGINT (tested on single machine)
t = Thread(target=badWorker)
# Ideally this would be a daemon thread but that causes an intermittent (but benign)
# exception similar to the one described here:
# http://stackoverflow.com/questions/20596918/python-exception-in-thread-thread-1-most-likely-raised-during-interpreter-shutd
# Our exception is:
# Exception in thread Thread-1 (most likely raised during interpreter shutdown):
# <type 'exceptions.AttributeError'>: 'NoneType' object has no attribute 'kill'
# This attribute error is caused by the call os.kill() and apparently unavoidable with a
# daemon
t.start()
##########################################
#Load the environment for the jobWrapper
##########################################
#First load the environment for the jobWrapper.
with jobStore.readSharedFileStream("environment.pickle") as fileHandle:
environment = cPickle.load(fileHandle)
for i in environment:
if i not in ("TMPDIR", "TMP", "HOSTNAME", "HOSTTYPE"):
os.environ[i] = environment[i]
# sys.path is used by __import__ to find modules
if "PYTHONPATH" in environment:
for e in environment["PYTHONPATH"].split(':'):
if e != '':
sys.path.append(e)
setLogLevel(config.logLevel)
toilWorkflowDir = Toil.getWorkflowDir(config.workflowID, config.workDir)
##########################################
#Setup the temporary directories.
##########################################
# Dir to put all this worker's temp files in.
localWorkerTempDir = tempfile.mkdtemp(dir=toilWorkflowDir)
os.chmod(localWorkerTempDir, 0755)
##########################################
#Setup the logging
##########################################
#This is mildly tricky because we don't just want to
#redirect stdout and stderr for this Python process; we want to redirect it
#for this process and all children. Consequently, we can't just replace
#sys.stdout and sys.stderr; we need to mess with the underlying OS-level
#file descriptors. See <http://stackoverflow.com/a/11632982/402891>
#When we start, standard input is file descriptor 0, standard output is
#file descriptor 1, and standard error is file descriptor 2.
#What file do we want to point FDs 1 and 2 to?
tempWorkerLogPath = os.path.join(localWorkerTempDir, "worker_log.txt")
#Save the original stdout and stderr (by opening new file descriptors to the
#.........这里部分代码省略.........
示例3: physicalDisk
# 需要导入模块: from toil.common import Toil [as 别名]
# 或者: from toil.common.Toil import getWorkflowDir [as 别名]
def physicalDisk(config, toilWorkflowDir=None):
if toilWorkflowDir is None:
from toil.common import Toil
toilWorkflowDir = Toil.getWorkflowDir(config.workflowID, config.workDir)
diskStats = os.statvfs(toilWorkflowDir)
return diskStats.f_frsize * diskStats.f_bavail