本文整理汇总了Python中JobManager类的典型用法代码示例。如果您正苦于以下问题:Python JobManager类的具体用法?Python JobManager怎么用?Python JobManager使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了JobManager类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: getJobInfo
def getJobInfo(jobId = None):
"""
Gets a job or all jobs information.
@since: 1.0
@type jobId: integer, or None
@param jobId: the job ID identifying the job whose status should be retrieved, or None for all jobs.
@throws Exception: in case of an internal error.
@rtype: a list of dict
{'id': integer, 'parent-id': integer, 'name': string,
'state': string in ['waiting', 'running', 'stopped', 'cancelled', 'killed', 'paused'],
'running-time': float or None, 'result': integer or None, 'username': string,
'start-time': float or None, 'stop-time': float or None, 'scheduled-at': float,
'type': string in ['ats', 'campaign'],
'path': string (docroot-based path for jobs whose source is in docroot) or None (client-based source)
}
@returns: a list of info for the given job, or for all jobs in the queue if jobId is None.
@throws Exception: when the job was not found, or when the job file was removed.
"""
getLogger().info(">> getJobInfo(%s)" % str(jobId))
res = []
try:
res = JobManager.instance().getJobInfo(jobId)
except Exception, e:
e = Exception("Unable to complete getJobInfo operation: %s\n%s" % (str(e), Tools.getBacktrace()))
getLogger().info("<< getJobInfo(...): Fault:\n%s" % str(e))
raise(e)
示例2: getJobDetails
def getJobDetails(jobId):
"""
Gets a specific job's details.
@since: 1.8
@type jobId: integer
@param jobId: the job ID identifying the job whose status should be retrieved.
@throws Exception: in case of an internal error.
@rtype: dict
{'id': integer, 'parent-id': integer, 'name': string,
'state': string in ['waiting', 'running', 'stopped', 'cancelled', 'killed', 'paused'],
'running-time': float or None, 'result': integer or None, 'username': string,
'start-time': float or None, 'stop-time': float or None, 'scheduled-at': float,
'type': string in ['ats', 'campaign'],
'path': string (docroot-based path for jobs whose source is in docroot) or None (client-based source),
'te-filename': string or None
'te-input-parameters': dict or None
'te-command-line': string or None
'source': base64-encoded string
}
@returns: a dict of info for the given job, or None if not found.
@throws Exception: when the job was not found, or when the job file was removed.
"""
getLogger().info(">> getJobDetails(%s)" % str(jobId))
res = []
try:
res = JobManager.instance().getJobDetails(jobId)
except Exception, e:
e = Exception("Unable to complete getJobDetails operation: %s\n%s" % (str(e), Tools.getBacktrace()))
getLogger().info("<< getJobDetails(...): Fault:\n%s" % str(e))
raise(e)
示例3: __init__
def __init__(self, sConfigFile):
self.sConfigFile = sConfigFile
self.dbConnection = ""
self.logger = None
self.initLogging("JobOrganizer")
self.logger.info("========================================")
self.logger.info("JobOrganizer in init")
# check if initfile is valid
if os.path.exists(self.sConfigFile):
self.logger.info("init file " + self.sConfigFile + " was found")
self.configModule = __import__(self.sConfigFile.strip(".py"))
#self.createRunlisteners()
#self.initRunListeners()
self.initDbParameters()
self.readJobsConfiguration()
self.jobManager = JobManager(self.sConfigFile)
Tools.createFolder('tmp/jobConfig')
else:
self.logger.error("init file "+ self.sConfigFile + " was not found !")
sys.exit(0)
self.logger.info("JobOrganizer initialized")
self.logger.info("========================================")
示例4: getJobLog
def getJobLog(jobId, useCompression = True):
"""
Gets the current log for an existing job.
@since: 1.0
@type jobId: integer
@param jobId: the job ID identifying the job whose log should be retrieved
@type useCompression: bool
@param useCompression: if set to True, compress the log using zlib before encoding the response in base64
@rtype: string
@returns: the job's log in utf-8 encoded XML,
optionally gzip + base64 encoded if useCompression is set to True
"""
getLogger().info(">> getJobLog(%d, %s)" % (jobId, str(useCompression)))
res = None
try:
log = JobManager.instance().getJobLog(jobId)
if log is not None:
if useCompression:
res = base64.encodestring(zlib.compress(log))
else:
res = base64.encodestring(log)
except Exception, e:
e = Exception("Unable to complete getJobLog operation: %s\n%s" % (str(e), Tools.getBacktrace()))
getLogger().info("<< getJobLog(...): Fault:\n%s" % str(e))
raise(e)
示例5: run
def run(self, name, datafiles, goldnet_file):
import numpy
os.chdir(os.environ["gene_path"])
datastore = ReadData(datafiles[0], "steadystate")
for file in datafiles[1:]:
datastore.combine(ReadData(file, "steadystate"))
datastore.normalize()
settings = {}
settings = ReadConfig(settings)
# TODO: CHANGE ME
settings["global"]["working_dir"] = os.getcwd() + '/'
# Setup job manager
print "Starting new job manager"
jobman = JobManager(settings)
# Make GENIE3 jobs
genie3 = GENIE3()
genie3.setup(datastore, settings, name)
print "Queuing job..."
jobman.queueJob(genie3)
print jobman.queue
print "Running queue..."
jobman.runQueue()
jobman.waitToClear()
print "Queue finished"
job = jobman.finished[0]
print job.alg.gene_list
print job.alg.read_output(settings)
jobnet = job.alg.network
print "PREDICTED NETWORK:"
print job.alg.network.network
print jobnet.original_network
if goldnet_file != None:
goldnet = Network()
goldnet.read_goldstd(goldnet_file)
print "GOLD NETWORK:"
print goldnet.network
print jobnet.analyzeMotifs(goldnet).ToString()
print jobnet.calculateAccuracy(goldnet)
return jobnet.original_network
示例6: run
def run(self, ts_file, name=None, delta_t=30):
os.chdir(os.environ["gene_path"])
print "Reading in knockout data"
timeseries_storage = ReadData(ts_file, "timeseries")
settings = {}
settings = ReadConfig(settings)
# TODO: CHANGE ME
settings["global"]["working_dir"] = os.getcwd() + "/"
# Setup job manager
print "Starting new job manager"
jobman = JobManager(settings)
# Make Banjo jobs
banjojob = Banjo()
if delta_t != None:
settings["global"]["time_series_delta_t"] = int(delta_t)
else:
settings["global"]["time_series_delta_t"] = 30
if name != None:
banjojob.setup(timeseries_storage, settings, name)
else:
banjojob.setup(timeseries_storage, settings)
print "Queuing job..."
jobman.queueJob(banjojob)
print jobman.queue
print "Running queue..."
jobman.runQueue()
jobman.waitToClear()
print "Queue finished"
job = jobman.finished[0]
print job.alg.gene_list
print job.alg.read_output(settings)
jobnet = job.alg.network
print "PREDICTED NETWORK:"
# print job.alg.network.network
# print jobnet.original_network
return jobnet.original_network
示例7: persistJobQueue
def persistJobQueue():
"""
Persists the current job queue to the standard persistence file.
This administrative function may be convenient when you're about
to kill the server violently.
@since: 1.5
@throws Exception in case of an error
@rtype: None
"""
getLogger().info(">> persistJobQueue()")
try:
res = JobManager.instance().persist()
except Exception, e:
e = Exception("Unable to complete persistJobQueue operation: %s\n%s" % (str(e), Tools.getBacktrace()))
getLogger().info("<< persistJobQueue(...): Fault:\n%s" % str(e))
raise(e)
示例8: getJobLogFilename
def getJobLogFilename(jobId):
"""
Gets an existing job's log filename.
@since: 1.0
@type jobId: integer
@param jobId: the job ID identifying the job whose log filename should be retrieved
@rtype: string, or None
@returns: the log filename relative to the docroot,
or None if the job was not found
"""
getLogger().info(">> getJobLogFilename(%d)" % jobId)
res = None
try:
res = JobManager.instance().getJobLogFilename(jobId)
except Exception, e:
e = Exception("Unable to complete getJobLogFilename operation: %s\n%s" % (str(e), Tools.getBacktrace()))
getLogger().info("<< getJobLogFilename(...): Fault:\n%s" % str(e))
raise(e)
示例9: sendSignal
def sendSignal(jobId, signal):
"""
Sends a signal to the job id'd by jobId.
@since: 1.0
@type jobId: integer
@param jobId: the job Id
@type signal: string
@param signal: the signal to send to the job
@throws Exception: in case of an internal error.
@rtype: bool
@returns: True if successfully sent, or False if the job was not found.
"""
getLogger().info(">> sendSignal(%d, %s)" % (jobId, signal))
ret = False
try:
ret = JobManager.instance().sendSignal(jobId, signal)
except Exception, e:
e = Exception("Unable to perform operation: %s\n%s" % (str(e), Tools.getBacktrace()))
getLogger().info("<< sendSignal(...): Fault:\n%s" % str(e))
raise(e)
示例10: rescheduleJob
def rescheduleJob(jobId, at):
"""
Reschedules a job to start at <at>.
@since: 1.2
@type jobId: integer
@param jobId: the jobId identifying the job that needs rescheduling
@type at: float
@param at: the timestamp of the new scheduled start
@throws Exception: in case of an internal error.
@rtype: bool
@returns: True if the rescheduling was OK, False otherwise (job already started)
"""
getLogger().info(">> rescheduleJob(%s)" % str(jobId))
res = False
try:
res = JobManager.instance().rescheduleJob(jobId, at)
except Exception, e:
e = Exception("Unable to complete rescheduleJob operation: %s\n%s" % (str(e), Tools.getBacktrace()))
getLogger().info("<< rescheduleJob(...): Fault:\n%s" % str(e))
raise(e)
示例11: purgeJobQueue
def purgeJobQueue(older_than):
"""
Purges jobs in the queue that:
- are completed (any status)
- and whose completion time is strictly older than the provided older_than timestamp (UTC)
@since: 1.5
@type older_than: float (timestamp)
@param older_than: the epoch timestamp of the older job to keep
@throws Exception in case of an error
@rtype: int
@returns: the number of purged jobs
"""
getLogger().info(">> purgeJobQueue(%s)" % older_than)
res = 0
try:
res = JobManager.instance().purgeJobs(older_than)
except Exception, e:
e = Exception("Unable to complete purgeJobs operation: %s\n%s" % (str(e), Tools.getBacktrace()))
getLogger().info("<< purgeJobQueue(...): Fault:\n%s" % str(e))
raise(e)
示例12: ReadData
ko_file = settings["global"]["dream4100_network_knockout_file"].split()
kd_file = settings["global"]["dream4100_network_knockdown_file"].split()
ts_file = settings["global"]["dream4100_network_timeseries_file"].split()
wt_file = settings["global"]["dream4100_network_wildtype_file"].split()
# Read data into program
# Where the format is "FILENAME" "DATATYPE"
knockout_storage = ReadData(ko_file[0], "knockout")
knockdown_storage = ReadData(kd_file[0], "knockdown")
timeseries_storage = ReadData(ts_file[0], "timeseries")
wildtype_storage = ReadData(wt_file[0], "wildtype")
# Setup job manager
jobman = JobManager(settings)
# Make NIR jobs
min_restk = max(len(knockout_storage.gene_list) / 5, 3)
max_restk = len(knockout_storage.gene_list) / 2 + 1
rest_list = list(set([3,5,20,21] + [i for i in range(min_restk, max_restk)]))
rest_list = [3,5,10,15,12,20,21]
for i in rest_list:
nirjob = NIR()
nirjob.setup(knockout_storage, settings, "NIR_K="+str(i), 5, i)
jobman.queueJob(nirjob)
print jobman.queue
jobman.runQueue()
jobman.waitToClear()
示例13: run
def run(self):
interrupted = False
corrupted_job_regex = re.compile("Output files for job (\d+) seems corrupted")
email = Config.get().get()["email"]
try:
while True:
if not Utils.is_proxy_valid():
Utils.delegate_proxy(self.verbose)
self.status()
get_id = []
kill_id = []
submit_id = []
resubmit_id = []
force_resubmit_id = []
corrupted_id = []
n_waiting = 0
n_running = 0
for (id, job) in self.jobs.items():
status = JobManager.create(job)
if status.gettable():
get_id.append(str(id))
if status.killable():
kill_id.append(str(id))
if status.submittable():
submit_id.append(str(id))
if status.failed():
resubmit_id.append(str(id))
if status.running():
n_running = n_running + 1
if status.waiting():
n_waiting = n_waiting + 1
job._status = status
if self.verbose:
if len(get_id) > 0:
print("I'll get jobs " + ",".join(get_id))
else:
print("No job to get output for")
if len(kill_id) > 0:
print("I'll kill jobs " + ",".join(kill_id))
else:
print("No job to kill")
if len(resubmit_id) > 0:
print("I'll resubmit jobs " + ",".join(resubmit_id))
else:
print("No job to resubmit")
print("")
self.dump()
log = ""
if not self.dry_run:
if len(get_id) > 0:
if self.verbose:
print("Retrieving jobs...")
(output, returncode) = Utils.runCrab("get", ",".join(get_id), self.folder)
log += "crab -get output:\n"
log += "".join(output)
log += "\n"
# Detect corrupted jobs
lines = output
for line in lines:
matches = re.search(corrupted_job_regex, line)
if matches is not None:
corrupted_id.append(str(matches.group(1)))
if len(corrupted_id) > 0:
if self.verbose:
print("Some jobs are corrupted: " + ",".join(corrupted_id))
kill_id.extend(corrupted_id)
kill_id.sort()
force_resubmit_id.extend(corrupted_id)
force_resubmit_id.sort()
if len(kill_id) > 0:
if self.verbose:
print("Killing jobs...")
(output, returncode) = Utils.runCrab("kill", ",".join(kill_id), self.folder)
log += "crab -kill output:\n"
log += "".join(output)
log += "\n"
if len(submit_id) > 0:
# Crab only accept a maximum of 500 jobs on submit.
splitted_submit_ids = chunks(submit_id, 500)
#.........这里部分代码省略.........
示例14: get_example_data_files
return cache[name]
# Gather networks
ko_file, kd_file, ts_file, wt_file, mf_file, goldnet = get_example_data_files(sys.argv[1], settings)
# Read data into program
# Where the format is "FILENAME" "DATATYPE"
mf_storage = ReadData(mf_file[0], "multifactorial")
knockout_storage = ReadData(ko_file[0], "knockout")
knockdown_storage = ReadData(kd_file[0], "knockdown")
wildtype_storage = ReadData(wt_file[0], "wildtype")
timeseries_storage = ReadData(ts_file[0], "timeseries")
gene_list = knockout_storage.gene_list
votejob = MCZ()
votejob.setup(knockout_storage, wildtype_storage, settings, timeseries_storage, knockdown_storage, "SimAnnealing")
jobman = JobManager(settings)
jobman.queueJob(votejob)
votejob = jobman.queue[0]
jobman.runQueue()
jobman.waitToClear("VotingJob")
# Send to voting algorithm
dream410 = ["dream410","dream410_2","dream410_3","dream410_4","dream410_5"]
#dream410 = ["dream410","dream410_2"]
dream4100 = ["dream4100","dream4100_2","dream4100_3","dream4100_4","dream4100_5"]
if sys.argv[1] == "dream410":
networks = dream410
elif sys.argv[1] == "dream4100":
networks = dream4100
else:
networks = [sys.argv[1]]
示例15: ReadConfig
# Set up output directory
t = datetime.now().strftime("%Y-%m-%d_%H.%M.%S")
settings["global"]["output_dir"] = (
settings["global"]["output_dir"] + "/" + settings["global"]["experiment_name"] + "-" + t + "/"
)
os.mkdir(settings["global"]["output_dir"])
# Read in configs for this algorithm
from dfg4grn import *
settings = ReadConfig(settings, "./config/default_values/dfg4grn.cfg")
settings = ReadConfig(settings, settings["dfg4grn"]["config"])
grid = Generate_Grid("dfg4grn", None, settings, ["eta_z", "lambda_w", "tau"], 5).test_list
jobman = JobManager(settings)
dfg = DFG4GRN()
settings["dfg4grn"]["eta_z"] = 0.1
settings["dfg4grn"]["lambda_w"] = 0.01
settings["dfg4grn"]["tau"] = 3.5
dfg.setup(
timeseries_storage,
TFList(timeseries_storage[0].gene_list),
settings,
"EtaZ-{0}_LamdaW-{1}_Tau-{2}".format(0.1, 0.01, 3.5),
20,
)
jobman.queueJob(dfg)
dfg = DFG4GRN()