本文整理汇总了Python中galaxy.jobs.runners.AsynchronousJobState.old_state方法的典型用法代码示例。如果您正苦于以下问题:Python AsynchronousJobState.old_state方法的具体用法?Python AsynchronousJobState.old_state怎么用?Python AsynchronousJobState.old_state使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类galaxy.jobs.runners.AsynchronousJobState
的用法示例。
在下文中一共展示了AsynchronousJobState.old_state方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: recover
# 需要导入模块: from galaxy.jobs.runners import AsynchronousJobState [as 别名]
# 或者: from galaxy.jobs.runners.AsynchronousJobState import old_state [as 别名]
def recover(self,job,job_wrapper):
# Recovers jobs in the queued/running state when Galaxy started
# What is 'job' an instance of???
# Could be model.Job?
# Fetch the job id used by JSE-Drop
job_name = job.get_job_runner_external_id()
# Get the job destination
job_destination = job_wrapper.job_destination
# Fetch the drop dir
drop_off_dir = self._get_drop_dir()
log.debug("recover: drop-off dir = %s" % drop_off_dir)
jse_drop = JSEDrop(drop_off_dir)
# Store state information for job
job_state = AsynchronousJobState()
job_state.job_wrapper = job_wrapper
job_state.job_id = job_name
job_state.job_destination = job_destination
# Sort out the status
if job.state == model.Job.states.RUNNING:
job_state.old_state = True
job_state.running = True
elif job.get_state() == model.Job.states.QUEUED:
job_state.old_state = True
job_state.running = False
# Add to the queue of jobs to monitor
self.monitor_queue.put(job_state)
示例2: recover
# 需要导入模块: from galaxy.jobs.runners import AsynchronousJobState [as 别名]
# 或者: from galaxy.jobs.runners.AsynchronousJobState import old_state [as 别名]
def recover(self, job, job_wrapper):
"""Recovers jobs stuck in the queued/running state when Galaxy started"""
job_id = job.get_job_runner_external_id()
pbs_job_state = AsynchronousJobState()
pbs_job_state.output_file = "%s/%s.o" % (self.app.config.cluster_files_directory, job.id)
pbs_job_state.error_file = "%s/%s.e" % (self.app.config.cluster_files_directory, job.id)
pbs_job_state.exit_code_file = "%s/%s.ec" % (self.app.config.cluster_files_directory, job.id)
pbs_job_state.job_file = "%s/%s.sh" % (self.app.config.cluster_files_directory, job.id)
pbs_job_state.job_id = str(job_id)
pbs_job_state.runner_url = job_wrapper.get_job_runner_url()
pbs_job_state.job_destination = job_wrapper.job_destination
job_wrapper.command_line = job.command_line
pbs_job_state.job_wrapper = job_wrapper
if job.state == model.Job.states.RUNNING:
log.debug(
"(%s/%s) is still in running state, adding to the PBS queue"
% (job.id, job.get_job_runner_external_id())
)
pbs_job_state.old_state = "R"
pbs_job_state.running = True
self.monitor_queue.put(pbs_job_state)
elif job.state == model.Job.states.QUEUED:
log.debug(
"(%s/%s) is still in PBS queued state, adding to the PBS queue"
% (job.id, job.get_job_runner_external_id())
)
pbs_job_state.old_state = "Q"
pbs_job_state.running = False
self.monitor_queue.put(pbs_job_state)
示例3: recover
# 需要导入模块: from galaxy.jobs.runners import AsynchronousJobState [as 别名]
# 或者: from galaxy.jobs.runners.AsynchronousJobState import old_state [as 别名]
def recover(self, job, job_wrapper):
"""Recovers jobs stuck in the queued/running state when Galaxy started"""
# TODO this needs to be implemented to override unimplemented base method
job_id = job.get_job_runner_external_id()
log.debug("k8s trying to recover job: " + job_id)
if job_id is None:
self.put(job_wrapper)
return
ajs = AsynchronousJobState(files_dir=job_wrapper.working_directory, job_wrapper=job_wrapper)
ajs.job_id = str(job_id)
ajs.command_line = job.command_line
ajs.job_wrapper = job_wrapper
ajs.job_destination = job_wrapper.job_destination
if job.state == model.Job.states.RUNNING:
log.debug("(%s/%s) is still in running state, adding to the runner monitor queue" % (
job.id, job.job_runner_external_id))
ajs.old_state = model.Job.states.RUNNING
ajs.running = True
self.monitor_queue.put(ajs)
elif job.state == model.Job.states.QUEUED:
log.debug("(%s/%s) is still in queued state, adding to the runner monitor queue" % (
job.id, job.job_runner_external_id))
ajs.old_state = model.Job.states.QUEUED
ajs.running = False
self.monitor_queue.put(ajs)
示例4: recover
# 需要导入模块: from galaxy.jobs.runners import AsynchronousJobState [as 别名]
# 或者: from galaxy.jobs.runners.AsynchronousJobState import old_state [as 别名]
def recover( self, job, job_wrapper ):
"""Recovers jobs stuck in the queued/running state when Galaxy started"""
job_id = job.get_job_runner_external_id()
if job_id is None:
self.put( job_wrapper )
return
ajs = AsynchronousJobState( files_dir=job_wrapper.working_directory, job_wrapper=job_wrapper )
ajs.job_id = str( job_id )
ajs.command_line = job.get_command_line()
ajs.job_wrapper = job_wrapper
ajs.job_destination = job_wrapper.job_destination
if job.state == model.Job.states.RUNNING:
log.debug( "(%s/%s) is still in running state, adding to the DRM queue" % ( job.get_id(), job.get_job_runner_external_id() ) )
ajs.old_state = drmaa.JobState.RUNNING
ajs.running = True
self.monitor_queue.put( ajs )
elif job.get_state() == model.Job.states.QUEUED:
log.debug( "(%s/%s) is still in DRM queued state, adding to the DRM queue" % ( job.get_id(), job.get_job_runner_external_id() ) )
ajs.old_state = drmaa.JobState.QUEUED_ACTIVE
ajs.running = False
self.monitor_queue.put( ajs )
示例5: recover
# 需要导入模块: from galaxy.jobs.runners import AsynchronousJobState [as 别名]
# 或者: from galaxy.jobs.runners.AsynchronousJobState import old_state [as 别名]
def recover(self, job, job_wrapper):
""" Recovers jobs stuck in the queued/running state when Galaxy started """
""" This method is called by galaxy at the time of startup.
Jobs in Running & Queued status in galaxy are put in the monitor_queue by creating an AsynchronousJobState object
"""
job_id = job_wrapper.job_id
ajs = AsynchronousJobState(files_dir=job_wrapper.working_directory, job_wrapper=job_wrapper)
ajs.job_id = str(job_id)
ajs.job_destination = job_wrapper.job_destination
job_wrapper.command_line = job.command_line
ajs.job_wrapper = job_wrapper
if job.state == model.Job.states.RUNNING:
log.debug("(%s/%s) is still in running state, adding to the god queue" % (job.id, job.get_job_runner_external_id()))
ajs.old_state = 'R'
ajs.running = True
self.monitor_queue.put(ajs)
elif job.state == model.Job.states.QUEUED:
log.debug("(%s/%s) is still in god queued state, adding to the god queue" % (job.id, job.get_job_runner_external_id()))
ajs.old_state = 'Q'
ajs.running = False
self.monitor_queue.put(ajs)
示例6: recover
# 需要导入模块: from galaxy.jobs.runners import AsynchronousJobState [as 别名]
# 或者: from galaxy.jobs.runners.AsynchronousJobState import old_state [as 别名]
def recover( self, job, job_wrapper ):
"""Recovers jobs stuck in the queued/running state when Galaxy started"""
job_state = AsynchronousJobState()
job_state.job_id = str( job.get_job_runner_external_id() )
job_state.runner_url = job_wrapper.get_job_runner_url()
job_state.job_destination = job_wrapper.job_destination
job_wrapper.command_line = job.get_command_line()
job_state.job_wrapper = job_wrapper
state = job.get_state()
if state in [model.Job.states.RUNNING, model.Job.states.QUEUED]:
log.debug( "(LWR/%s) is still in running state, adding to the LWR queue" % ( job.get_id()) )
job_state.old_state = True
job_state.running = state == model.Job.states.RUNNING
self.monitor_queue.put( job_state )
示例7: recover
# 需要导入模块: from galaxy.jobs.runners import AsynchronousJobState [as 别名]
# 或者: from galaxy.jobs.runners.AsynchronousJobState import old_state [as 别名]
def recover(self, job, job_wrapper):
msg = ('(name!r/runner!r) is still in {state!s} state, adding to'
' the runner monitor queue')
job_id = job.get_job_runner_external_id()
ajs = AsynchronousJobState(files_dir=job_wrapper.working_directory,
job_wrapper=job_wrapper)
ajs.job_id = self.JOB_NAME_PREFIX + str(job_id)
ajs.command_line = job.command_line
ajs.job_wrapper = job_wrapper
ajs.job_destination = job_wrapper.job_destination
if job.state == model.Job.states.RUNNING:
LOGGER.debug(msg.format(
name=job.id, runner=job.job_runner_external_id,
state='running'))
ajs.old_state = model.Job.states.RUNNING
ajs.running = True
self.monitor_queue.put(ajs)
elif job.state == model.Job.states.QUEUED:
LOGGER.debug(msg.format(
name=job.id, runner=job.job_runner_external_id,
state='queued'))
ajs.old_state = model.Job.states.QUEUED
ajs.running = False
self.monitor_queue.put(ajs)
示例8: queue_job
# 需要导入模块: from galaxy.jobs.runners import AsynchronousJobState [as 别名]
# 或者: from galaxy.jobs.runners.AsynchronousJobState import old_state [as 别名]
def queue_job(self, job_wrapper):
job_destination = job_wrapper.job_destination
self._populate_parameter_defaults(job_destination)
command_line, client, remote_job_config, compute_environment = self.__prepare_job(job_wrapper, job_destination)
if not command_line:
return
try:
dependencies_description = PulsarJobRunner.__dependencies_description(client, job_wrapper)
rewrite_paths = not PulsarJobRunner.__rewrite_parameters(client)
unstructured_path_rewrites = {}
output_names = []
if compute_environment:
unstructured_path_rewrites = compute_environment.unstructured_path_rewrites
output_names = compute_environment.output_names()
client_job_description = ClientJobDescription(
command_line=command_line,
input_files=self.get_input_files(job_wrapper),
client_outputs=self.__client_outputs(client, job_wrapper),
working_directory=job_wrapper.tool_working_directory,
metadata_directory=job_wrapper.working_directory,
tool=job_wrapper.tool,
config_files=job_wrapper.extra_filenames,
dependencies_description=dependencies_description,
env=client.env,
rewrite_paths=rewrite_paths,
arbitrary_files=unstructured_path_rewrites,
touch_outputs=output_names,
)
job_id = pulsar_submit_job(client, client_job_description, remote_job_config)
log.info("Pulsar job submitted with job_id %s" % job_id)
job_wrapper.set_job_destination(job_destination, job_id)
job_wrapper.change_state(model.Job.states.QUEUED)
except Exception:
job_wrapper.fail("failure running job", exception=True)
log.exception("failure running job %d", job_wrapper.job_id)
return
pulsar_job_state = AsynchronousJobState()
pulsar_job_state.job_wrapper = job_wrapper
pulsar_job_state.job_id = job_id
pulsar_job_state.old_state = True
pulsar_job_state.running = False
pulsar_job_state.job_destination = job_destination
self.monitor_job(pulsar_job_state)
示例9: queue_job
# 需要导入模块: from galaxy.jobs.runners import AsynchronousJobState [as 别名]
# 或者: from galaxy.jobs.runners.AsynchronousJobState import old_state [as 别名]
def queue_job(self, job_wrapper):
command_line = ''
job_destination = job_wrapper.job_destination
try:
job_wrapper.prepare()
if hasattr(job_wrapper, 'prepare_input_files_cmds') and job_wrapper.prepare_input_files_cmds is not None:
for cmd in job_wrapper.prepare_input_files_cmds: # run the commands to stage the input files
#log.debug( 'executing: %s' % cmd )
if 0 != os.system(cmd):
raise Exception('Error running file staging command: %s' % cmd)
job_wrapper.prepare_input_files_cmds = None # prevent them from being used in-line
command_line = self.build_command_line( job_wrapper, include_metadata=False, include_work_dir_outputs=False )
except:
job_wrapper.fail( "failure preparing job", exception=True )
log.exception("failure running job %d" % job_wrapper.job_id)
return
# If we were able to get a command line, run the job
if not command_line:
job_wrapper.finish( '', '' )
return
try:
client = self.get_client_from_wrapper(job_wrapper)
output_files = self.get_output_files(job_wrapper)
input_files = job_wrapper.get_input_fnames()
working_directory = job_wrapper.working_directory
tool = job_wrapper.tool
file_stager = FileStager(client, tool, command_line, job_wrapper.extra_filenames, input_files, output_files, working_directory)
rebuilt_command_line = file_stager.get_rewritten_command_line()
job_id = file_stager.job_id
client.launch( rebuilt_command_line )
job_wrapper.set_job_destination( job_destination, job_id )
job_wrapper.change_state( model.Job.states.QUEUED )
except:
job_wrapper.fail( "failure running job", exception=True )
log.exception("failure running job %d" % job_wrapper.job_id)
return
lwr_job_state = AsynchronousJobState()
lwr_job_state.job_wrapper = job_wrapper
lwr_job_state.job_id = job_id
lwr_job_state.old_state = True
lwr_job_state.running = False
lwr_job_state.job_destination = job_destination
self.monitor_job(lwr_job_state)
示例10: queue_job
# 需要导入模块: from galaxy.jobs.runners import AsynchronousJobState [as 别名]
# 或者: from galaxy.jobs.runners.AsynchronousJobState import old_state [as 别名]
def queue_job(self, job_wrapper):
job_destination = job_wrapper.job_destination
command_line, client, remote_job_config = self.__prepare_job( job_wrapper, job_destination )
if not command_line:
return
try:
dependency_resolution = LwrJobRunner.__dependency_resolution( client )
remote_dependency_resolution = dependency_resolution == "remote"
requirements = job_wrapper.tool.requirements if remote_dependency_resolution else []
client_job_description = ClientJobDescription(
command_line=command_line,
output_files=self.get_output_files(job_wrapper),
input_files=job_wrapper.get_input_fnames(),
working_directory=job_wrapper.working_directory,
tool=job_wrapper.tool,
config_files=job_wrapper.extra_filenames,
requirements=requirements,
version_file=job_wrapper.get_version_string_path(),
)
job_id = lwr_submit_job(client, client_job_description, remote_job_config)
log.info("lwr job submitted with job_id %s" % job_id)
job_wrapper.set_job_destination( job_destination, job_id )
job_wrapper.change_state( model.Job.states.QUEUED )
except Exception:
job_wrapper.fail( "failure running job", exception=True )
log.exception("failure running job %d" % job_wrapper.job_id)
return
lwr_job_state = AsynchronousJobState()
lwr_job_state.job_wrapper = job_wrapper
lwr_job_state.job_id = job_id
lwr_job_state.old_state = True
lwr_job_state.running = False
lwr_job_state.job_destination = job_destination
self.monitor_job(lwr_job_state)
示例11: queue_job
# 需要导入模块: from galaxy.jobs.runners import AsynchronousJobState [as 别名]
# 或者: from galaxy.jobs.runners.AsynchronousJobState import old_state [as 别名]
#.........这里部分代码省略.........
if job_wrapper.tool.old_id:
job_name += '_%s' % job_wrapper.tool.old_id
if self.external_runJob_script is None:
job_name += '_%s' % job_wrapper.user
job_name = ''.join( map( lambda x: x if x in ( string.letters + string.digits + '_' ) else '_', job_name ) )
ajs = AsynchronousJobState( files_dir=job_wrapper.working_directory, job_wrapper=job_wrapper, job_name=job_name )
# set up the drmaa job template
jt = self.ds.createJobTemplate()
jt.remoteCommand = ajs.job_file
jt.jobName = ajs.job_name
jt.workingDirectory = job_wrapper.working_directory
jt.outputPath = ":%s" % ajs.output_file
jt.errorPath = ":%s" % ajs.error_file
# Avoid a jt.exitCodePath for now - it's only used when finishing.
native_spec = job_destination.params.get('nativeSpecification', None)
if native_spec is not None:
jt.nativeSpecification = native_spec
# fill in the DRM's job run template
script = self.get_job_file(job_wrapper, exit_code_path=ajs.exit_code_file)
try:
fh = file( ajs.job_file, "w" )
fh.write( script )
fh.close()
os.chmod( ajs.job_file, 0o755 )
except:
job_wrapper.fail( "failure preparing job script", exception=True )
log.exception( "(%s) failure writing job script" % galaxy_id_tag )
return
# job was deleted while we were preparing it
if job_wrapper.get_state() == model.Job.states.DELETED:
log.debug( "(%s) Job deleted by user before it entered the queue" % galaxy_id_tag )
if self.app.config.cleanup_job in ( "always", "onsuccess" ):
job_wrapper.cleanup()
return
log.debug( "(%s) submitting file %s", galaxy_id_tag, ajs.job_file )
if native_spec:
log.debug( "(%s) native specification is: %s", galaxy_id_tag, native_spec )
# runJob will raise if there's a submit problem
if self.external_runJob_script is None:
# TODO: create a queue for retrying submission indefinitely
# TODO: configurable max tries and sleep
trynum = 0
external_job_id = None
fail_msg = None
while external_job_id is None and trynum < 5:
try:
external_job_id = self.ds.runJob(jt)
break
except ( drmaa.InternalException, drmaa.DeniedByDrmException ) as e:
trynum += 1
log.warning( '(%s) drmaa.Session.runJob() failed, will retry: %s', galaxy_id_tag, e )
fail_msg = "Unable to run this job due to a cluster error, please retry it later"
time.sleep( 5 )
except:
log.exception( '(%s) drmaa.Session.runJob() failed unconditionally', galaxy_id_tag )
trynum = 5
else:
log.error( "(%s) All attempts to submit job failed" % galaxy_id_tag )
if not fail_msg:
fail_msg = DEFAULT_JOB_PUT_FAILURE_MESSAGE
job_wrapper.fail( fail_msg )
self.ds.deleteJobTemplate( jt )
return
else:
job_wrapper.change_ownership_for_run()
# if user credentials are not available, use galaxy credentials (if permitted)
allow_guests = asbool(job_wrapper.job_destination.params.get( "allow_guests", False) )
pwent = job_wrapper.user_system_pwent
if pwent is None:
if not allow_guests:
fail_msg = "User %s is not mapped to any real user, and not permitted to start jobs." % job_wrapper.user
job_wrapper.fail( fail_msg )
self.ds.deleteJobTemplate( jt )
return
pwent = job_wrapper.galaxy_system_pwent
log.debug( '(%s) submitting with credentials: %s [uid: %s]' % ( galaxy_id_tag, pwent[0], pwent[2] ) )
filename = self.store_jobtemplate(job_wrapper, jt)
self.userid = pwent[2]
external_job_id = self.external_runjob(filename, pwent[2]).strip()
log.info( "(%s) queued as %s" % ( galaxy_id_tag, external_job_id ) )
# store runner information for tracking if Galaxy restarts
job_wrapper.set_job_destination( job_destination, external_job_id )
# Store DRM related state information for job
ajs.job_id = external_job_id
ajs.old_state = 'new'
ajs.job_destination = job_destination
# delete the job template
self.ds.deleteJobTemplate( jt )
# Add to our 'queue' of jobs to monitor
self.monitor_queue.put( ajs )
示例12: queue_job
# 需要导入模块: from galaxy.jobs.runners import AsynchronousJobState [as 别名]
# 或者: from galaxy.jobs.runners.AsynchronousJobState import old_state [as 别名]
def queue_job( self, job_wrapper ):
"""Create job script and submit it to the DRM"""
# prepare the job
if not self.prepare_job( job_wrapper, include_metadata=True ):
return
# command line has been added to the wrapper by prepare_job()
command_line = job_wrapper.runner_command_line
# get configured job destination
job_destination = job_wrapper.job_destination
# wrapper.get_id_tag() instead of job_id for compatibility with TaskWrappers.
galaxy_id_tag = job_wrapper.get_id_tag()
# define job attributes
job_name = 'g%s' % galaxy_id_tag
if job_wrapper.tool.old_id:
job_name += '_%s' % job_wrapper.tool.old_id
if self.external_runJob_script is None:
job_name += '_%s' % job_wrapper.user
job_name = ''.join( map( lambda x: x if x in ( string.letters + string.digits + '_' ) else '_', job_name ) )
ajs = AsynchronousJobState( files_dir=job_wrapper.working_directory, job_wrapper=job_wrapper, job_name=job_name )
# set up the drmaa job template
jt = self.ds.createJobTemplate()
jt.remoteCommand = ajs.job_file
jt.jobName = ajs.job_name
jt.outputPath = ":%s" % ajs.output_file
jt.errorPath = ":%s" % ajs.error_file
# Avoid a jt.exitCodePath for now - it's only used when finishing.
native_spec = job_destination.params.get('nativeSpecification', None)
if native_spec is not None:
jt.nativeSpecification = native_spec
# fill in the DRM's job run template
script = drm_template % ( job_wrapper.galaxy_lib_dir,
job_wrapper.get_env_setup_clause(),
os.path.abspath( job_wrapper.working_directory ),
command_line,
ajs.exit_code_file )
try:
fh = file( ajs.job_file, "w" )
fh.write( script )
fh.close()
os.chmod( ajs.job_file, 0755 )
except:
job_wrapper.fail( "failure preparing job script", exception=True )
log.exception( "(%s) failure writing job script" % galaxy_id_tag )
return
# job was deleted while we were preparing it
if job_wrapper.get_state() == model.Job.states.DELETED:
log.debug( "(%s) Job deleted by user before it entered the queue" % galaxy_id_tag )
if self.app.config.cleanup_job in ( "always", "onsuccess" ):
job_wrapper.cleanup()
return
log.debug( "(%s) submitting file %s" % ( galaxy_id_tag, ajs.job_file ) )
log.debug( "(%s) command is: %s" % ( galaxy_id_tag, command_line ) )
# runJob will raise if there's a submit problem
if self.external_runJob_script is None:
external_job_id = self.ds.runJob(jt)
else:
job_wrapper.change_ownership_for_run()
log.debug( '(%s) submitting with credentials: %s [uid: %s]' % ( galaxy_id_tag, job_wrapper.user_system_pwent[0], job_wrapper.user_system_pwent[2] ) )
filename = self.store_jobtemplate(job_wrapper, jt)
self.userid = job_wrapper.user_system_pwent[2]
external_job_id = self.external_runjob(filename, job_wrapper.user_system_pwent[2]).strip()
log.info( "(%s) queued as %s" % ( galaxy_id_tag, external_job_id ) )
# store runner information for tracking if Galaxy restarts
job_wrapper.set_job_destination( job_destination, external_job_id )
# Store DRM related state information for job
ajs.job_id = external_job_id
ajs.old_state = 'new'
ajs.job_destination = job_destination
# delete the job template
self.ds.deleteJobTemplate( jt )
# Add to our 'queue' of jobs to monitor
self.monitor_queue.put( ajs )
示例13: queue_job
# 需要导入模块: from galaxy.jobs.runners import AsynchronousJobState [as 别名]
# 或者: from galaxy.jobs.runners.AsynchronousJobState import old_state [as 别名]
#.........这里部分代码省略.........
if self.app.config.pbs_application_server:
pbs_ofile = self.app.config.pbs_application_server + ':' + ofile
pbs_efile = self.app.config.pbs_application_server + ':' + efile
output_files = [ str( o ) for o in output_fnames ]
output_files.append(ecfile)
stagein = self.get_stage_in_out( job_wrapper.get_input_fnames() + output_files, symlink=True )
stageout = self.get_stage_in_out( output_files )
attrs = [
dict( name=pbs.ATTR_o, value=pbs_ofile ),
dict( name=pbs.ATTR_e, value=pbs_efile ),
dict( name=pbs.ATTR_stagein, value=stagein ),
dict( name=pbs.ATTR_stageout, value=stageout ),
]
# If not, we're using NFS
else:
attrs = [
dict( name=pbs.ATTR_o, value=ofile ),
dict( name=pbs.ATTR_e, value=efile ),
]
# define PBS job options
attrs.append( dict( name=pbs.ATTR_N, value=str( "%s_%s_%s" % ( job_wrapper.job_id, job_wrapper.tool.id, job_wrapper.user ) ) ) )
job_attrs = pbs.new_attropl( len( attrs ) + len( pbs_options ) )
for i, attr in enumerate( attrs + pbs_options ):
job_attrs[i].name = attr['name']
job_attrs[i].value = attr['value']
if 'resource' in attr:
job_attrs[i].resource = attr['resource']
exec_dir = os.path.abspath( job_wrapper.working_directory )
# write the job script
if self.app.config.pbs_stage_path != '':
# touch the ecfile so that it gets staged
with open(ecfile, 'a'):
os.utime(ecfile, None)
stage_commands = pbs_symlink_template % (
" ".join( job_wrapper.get_input_fnames() + output_files ),
self.app.config.pbs_stage_path,
exec_dir,
)
else:
stage_commands = ''
env_setup_commands = [ stage_commands ]
script = self.get_job_file(job_wrapper, exit_code_path=ecfile, env_setup_commands=env_setup_commands)
job_file = "%s/%s.sh" % (self.app.config.cluster_files_directory, job_wrapper.job_id)
self.write_executable_script( job_file, script )
# job was deleted while we were preparing it
if job_wrapper.get_state() == model.Job.states.DELETED:
log.debug( "Job %s deleted by user before it entered the PBS queue" % job_wrapper.job_id )
pbs.pbs_disconnect(c)
if job_wrapper.cleanup_job in ( "always", "onsuccess" ):
self.cleanup( ( ofile, efile, ecfile, job_file ) )
job_wrapper.cleanup()
return
# submit
# The job tag includes the job and the task identifier
# (if a TaskWrapper was passed in):
galaxy_job_id = job_wrapper.get_id_tag()
log.debug("(%s) submitting file %s" % ( galaxy_job_id, job_file ) )
tries = 0
while tries < 5:
job_id = pbs.pbs_submit(c, job_attrs, job_file, pbs_queue_name, None)
tries += 1
if job_id:
pbs.pbs_disconnect(c)
break
errno, text = pbs.error()
log.warning( "(%s) pbs_submit failed (try %d/5), PBS error %d: %s" % (galaxy_job_id, tries, errno, text) )
time.sleep(2)
else:
log.error( "(%s) All attempts to submit job failed" % galaxy_job_id )
job_wrapper.fail( "Unable to run this job due to a cluster error, please retry it later" )
return
if pbs_queue_name is None:
log.debug("(%s) queued in default queue as %s" % (galaxy_job_id, job_id) )
else:
log.debug("(%s) queued in %s queue as %s" % (galaxy_job_id, pbs_queue_name, job_id) )
# persist destination
job_wrapper.set_job_destination( job_destination, job_id )
# Store PBS related state information for job
job_state = AsynchronousJobState()
job_state.job_wrapper = job_wrapper
job_state.job_id = job_id
job_state.job_file = job_file
job_state.output_file = ofile
job_state.error_file = efile
job_state.exit_code_file = ecfile
job_state.old_state = 'N'
job_state.running = False
job_state.job_destination = job_destination
# Add to our 'queue' of jobs to monitor
self.monitor_queue.put( job_state )
示例14: queue_job
# 需要导入模块: from galaxy.jobs.runners import AsynchronousJobState [as 别名]
# 或者: from galaxy.jobs.runners.AsynchronousJobState import old_state [as 别名]
def queue_job( self, job_wrapper ):
"""Create job script and submit it to the DRM"""
# prepare the job
if not self.prepare_job( job_wrapper, include_metadata=True ):
return
# Get shell and job execution interface
job_destination = job_wrapper.job_destination
shell_params, job_params = self.parse_destination_params(job_destination.params)
shell, job_interface = self.get_cli_plugins(shell_params, job_params)
# wrapper.get_id_tag() instead of job_id for compatibility with TaskWrappers.
galaxy_id_tag = job_wrapper.get_id_tag()
# define job attributes
ajs = AsynchronousJobState( files_dir=job_wrapper.working_directory, job_wrapper=job_wrapper )
job_file_kwargs = job_interface.job_script_kwargs(ajs.output_file, ajs.error_file, ajs.job_name)
script = self.get_job_file(
job_wrapper,
exit_code_path=ajs.exit_code_file,
**job_file_kwargs
)
try:
fh = file(ajs.job_file, "w")
fh.write(script)
fh.close()
except:
log.exception("(%s) failure writing job script" % galaxy_id_tag )
job_wrapper.fail("failure preparing job script", exception=True)
return
# job was deleted while we were preparing it
if job_wrapper.get_state() == model.Job.states.DELETED:
log.info("(%s) Job deleted by user before it entered the queue" % galaxy_id_tag )
if self.app.config.cleanup_job in ("always", "onsuccess"):
job_wrapper.cleanup()
return
log.debug( "(%s) submitting file: %s" % ( galaxy_id_tag, ajs.job_file ) )
cmd_out = shell.execute(job_interface.submit(ajs.job_file))
if cmd_out.returncode != 0:
log.error('(%s) submission failed (stdout): %s' % (galaxy_id_tag, cmd_out.stdout))
log.error('(%s) submission failed (stderr): %s' % (galaxy_id_tag, cmd_out.stderr))
job_wrapper.fail("failure submitting job")
return
# Some job runners return something like 'Submitted batch job XXXX'
# Strip and split to get job ID.
external_job_id = cmd_out.stdout.strip().split()[-1]
if not external_job_id:
log.error('(%s) submission did not return a job identifier, failing job' % galaxy_id_tag)
job_wrapper.fail("failure submitting job")
return
log.info("(%s) queued with identifier: %s" % ( galaxy_id_tag, external_job_id ) )
# store runner information for tracking if Galaxy restarts
job_wrapper.set_job_destination( job_destination, external_job_id )
# Store state information for job
ajs.job_id = external_job_id
ajs.old_state = 'new'
ajs.job_destination = job_destination
# Add to our 'queue' of jobs to monitor
self.monitor_queue.put( ajs )
示例15: queue_job
# 需要导入模块: from galaxy.jobs.runners import AsynchronousJobState [as 别名]
# 或者: from galaxy.jobs.runners.AsynchronousJobState import old_state [as 别名]
def queue_job(self, job_wrapper):
"""Write JSE-Drop file to drop location
"""
# Get the configured job destination
job_destination = job_wrapper.job_destination
# Get the parameters defined for this destination
# i.e. location of the drop-off directory etc
drop_off_dir = self._get_drop_dir()
virtual_env = self._get_virtual_env()
qsub_options = self._get_qsub_options(job_destination)
galaxy_slots = self._get_galaxy_slots(job_destination)
galaxy_id = self._get_galaxy_id()
log.debug("queue_job: drop-off dir = %s" % drop_off_dir)
log.debug("queue_job: virtual_env = %s" % virtual_env)
log.debug("queue_job: qsub options = %s" % qsub_options)
log.debug("queue_job: galaxy_slots = %s" % galaxy_slots)
log.debug("queue_job: galaxy_id = %s" % galaxy_id)
if drop_off_dir is None:
# Can't locate drop-off dir
job_wrapper.fail("failure preparing job script (no JSE-drop "
"directory defined)",exception=True )
log.exception("(%s/%s) failure writing job script (no "
"JSE-drop directory defined)" %
(galaxy_id_tag,job_name))
return
# Initialise JSE-drop wrapper
jse_drop = JSEDrop(drop_off_dir)
# ID and name for job
galaxy_id_tag = job_wrapper.get_id_tag()
log.debug("ID tag: %s" % galaxy_id_tag)
job_name = self._get_job_name(galaxy_id_tag,
job_wrapper.tool.old_id,
galaxy_id)
log.debug("Job name: %s" % job_name)
# Prepare the job wrapper (or abort)
if not self.prepare_job(job_wrapper):
return
# Sort out the slots (see e.g. condor.py for example)
if galaxy_slots:
galaxy_slots_statement = 'GALAXY_SLOTS="%s"; export GALAXY_SLOTS_CONFIGURED="1"' % galaxy_slots
else:
galaxy_slots_statement = 'GALAXY_SLOTS="1"'
# Create script contents
script = self.get_job_file(job_wrapper,
galaxy_virtual_env=virtual_env,
slots_statement=galaxy_slots_statement,
exit_code_path=None)
# Separate leading shell specification from generated script
shell = '\n'.join(filter(lambda x: x.startswith('#!'),
script.split('\n')))
script = '\n'.join(filter(lambda x: not x.startswith('#!'),
script.split('\n')))
# Create header with embedded qsub flags
qsub_header = ["-V",
"-wd %s" % job_wrapper.working_directory]
if qsub_options:
qsub_header.append(qsub_options)
qsub_header = '\n'.join(["#$ %s" % opt for opt in qsub_header])
log.debug("qsub_header: %s" % qsub_header)
# Reassemble the script components
script = "\n".join((shell,qsub_header,script))
# Create the drop file to submit the job
try:
drop_file = jse_drop.run(job_name,script)
log.debug("created drop file %s" % drop_file)
log.info("(%s) submitted as %s" % (galaxy_id_tag,job_name))
except:
# Some problem writing the qsub file
job_wrapper.fail("failure preparing job script",
exception=True )
log.exception("(%s/%s) failure writing job script" %
(galaxy_id_tag,job_name))
return
# External job id (i.e. id used by JSE-Drop as a handle to
# identify the job) is the same as the job name here
external_job_id = job_name
# Store runner information for tracking if Galaxy restarts
job_wrapper.set_job_destination(job_destination,
external_job_id)
# Store state information for job
job_state = AsynchronousJobState()
job_state.job_wrapper = job_wrapper
job_state.job_id = job_name
job_state.old_state = True
job_state.running = False
job_state.job_destination = job_destination
# Add to the queue of jobs to monitor
self.monitor_job(job_state)
log.info("%s: queued" % job_name)