当前位置: 首页>>代码示例>>Python>>正文


Python AsynchronousJobState.job_file方法代码示例

本文整理汇总了Python中galaxy.jobs.runners.AsynchronousJobState.job_file方法的典型用法代码示例。如果您正苦于以下问题:Python AsynchronousJobState.job_file方法的具体用法?Python AsynchronousJobState.job_file怎么用?Python AsynchronousJobState.job_file使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在galaxy.jobs.runners.AsynchronousJobState的用法示例。


在下文中一共展示了AsynchronousJobState.job_file方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: recover

# 需要导入模块: from galaxy.jobs.runners import AsynchronousJobState [as 别名]
# 或者: from galaxy.jobs.runners.AsynchronousJobState import job_file [as 别名]
 def recover(self, job, job_wrapper):
     """Recovers jobs stuck in the queued/running state when Galaxy started"""
     job_id = job.get_job_runner_external_id()
     pbs_job_state = AsynchronousJobState()
     pbs_job_state.output_file = "%s/%s.o" % (self.app.config.cluster_files_directory, job.id)
     pbs_job_state.error_file = "%s/%s.e" % (self.app.config.cluster_files_directory, job.id)
     pbs_job_state.exit_code_file = "%s/%s.ec" % (self.app.config.cluster_files_directory, job.id)
     pbs_job_state.job_file = "%s/%s.sh" % (self.app.config.cluster_files_directory, job.id)
     pbs_job_state.job_id = str(job_id)
     pbs_job_state.runner_url = job_wrapper.get_job_runner_url()
     pbs_job_state.job_destination = job_wrapper.job_destination
     job_wrapper.command_line = job.command_line
     pbs_job_state.job_wrapper = job_wrapper
     if job.state == model.Job.states.RUNNING:
         log.debug(
             "(%s/%s) is still in running state, adding to the PBS queue"
             % (job.id, job.get_job_runner_external_id())
         )
         pbs_job_state.old_state = "R"
         pbs_job_state.running = True
         self.monitor_queue.put(pbs_job_state)
     elif job.state == model.Job.states.QUEUED:
         log.debug(
             "(%s/%s) is still in PBS queued state, adding to the PBS queue"
             % (job.id, job.get_job_runner_external_id())
         )
         pbs_job_state.old_state = "Q"
         pbs_job_state.running = False
         self.monitor_queue.put(pbs_job_state)
开发者ID:Pelonza,项目名称:Learn2Mine-Main,代码行数:31,代码来源:pbs.py

示例2: queue_job

# 需要导入模块: from galaxy.jobs.runners import AsynchronousJobState [as 别名]
# 或者: from galaxy.jobs.runners.AsynchronousJobState import job_file [as 别名]
    def queue_job( self, job_wrapper ):
        """Create PBS script for a job and submit it to the PBS queue"""
        # prepare the job
        if not self.prepare_job( job_wrapper, include_metadata=not( self.app.config.pbs_stage_path ) ):
            return

        job_destination = job_wrapper.job_destination

        # Determine the job's PBS destination (server/queue) and options from the job destination definition
        pbs_queue_name = None
        pbs_server_name = self.default_pbs_server
        pbs_options = []
        if '-q' in job_destination.params and 'destination' not in job_destination.params:
            job_destination.params['destination'] = job_destination.params.pop('-q')
        if 'destination' in job_destination.params:
            if '@' in job_destination.params['destination']:
                # Destination includes a server
                pbs_queue_name, pbs_server_name = job_destination.params['destination'].split('@')
                if pbs_queue_name == '':
                    # e.g. `qsub -q @server`
                    pbs_queue_name = None
            else:
                # Destination is just a queue
                pbs_queue_name = job_destination.params['destination']
            job_destination.params.pop('destination')

        # Parse PBS params
        pbs_options = self.parse_destination_params(job_destination.params)

        # Explicitly set the determined PBS destination in the persisted job destination for recovery
        job_destination.params['destination'] = '%[email protected]%s' % (pbs_queue_name or '', pbs_server_name)

        c = pbs.pbs_connect( util.smart_str( pbs_server_name ) )
        if c <= 0:
            errno, text = pbs.error()
            job_wrapper.fail( "Unable to queue job for execution.  Resubmitting the job may succeed." )
            log.error( "Connection to PBS server for submit failed: %s: %s" % ( errno, text ) )
            return

        # define job attributes
        ofile = "%s/%s.o" % (self.app.config.cluster_files_directory, job_wrapper.job_id)
        efile = "%s/%s.e" % (self.app.config.cluster_files_directory, job_wrapper.job_id)
        ecfile = "%s/%s.ec" % (self.app.config.cluster_files_directory, job_wrapper.job_id)

        output_fnames = job_wrapper.get_output_fnames()

        # If an application server is set, we're staging
        if self.app.config.pbs_application_server:
            pbs_ofile = self.app.config.pbs_application_server + ':' + ofile
            pbs_efile = self.app.config.pbs_application_server + ':' + efile
            output_files = [ str( o ) for o in output_fnames ]
            output_files.append(ecfile)
            stagein = self.get_stage_in_out( job_wrapper.get_input_fnames() + output_files, symlink=True )
            stageout = self.get_stage_in_out( output_files )
            attrs = [
                dict( name=pbs.ATTR_o, value=pbs_ofile ),
                dict( name=pbs.ATTR_e, value=pbs_efile ),
                dict( name=pbs.ATTR_stagein, value=stagein ),
                dict( name=pbs.ATTR_stageout, value=stageout ),
            ]
        # If not, we're using NFS
        else:
            attrs = [
                dict( name=pbs.ATTR_o, value=ofile ),
                dict( name=pbs.ATTR_e, value=efile ),
            ]

        # define PBS job options
        attrs.append( dict( name=pbs.ATTR_N, value=str( "%s_%s_%s" % ( job_wrapper.job_id, job_wrapper.tool.id, job_wrapper.user ) ) ) )
        job_attrs = pbs.new_attropl( len( attrs ) + len( pbs_options ) )
        for i, attr in enumerate( attrs + pbs_options ):
            job_attrs[i].name = attr['name']
            job_attrs[i].value = attr['value']
            if 'resource' in attr:
                job_attrs[i].resource = attr['resource']
        exec_dir = os.path.abspath( job_wrapper.working_directory )

        # write the job script
        if self.app.config.pbs_stage_path != '':
            # touch the ecfile so that it gets staged
            with open(ecfile, 'a'):
                os.utime(ecfile, None)

            stage_commands = pbs_symlink_template % (
                " ".join( job_wrapper.get_input_fnames() + output_files ),
                self.app.config.pbs_stage_path,
                exec_dir,
            )
        else:
            stage_commands = ''

        env_setup_commands = [ stage_commands ]
        script = self.get_job_file(job_wrapper, exit_code_path=ecfile, env_setup_commands=env_setup_commands)
        job_file = "%s/%s.sh" % (self.app.config.cluster_files_directory, job_wrapper.job_id)
        self.write_executable_script( job_file, script )
        # job was deleted while we were preparing it
        if job_wrapper.get_state() == model.Job.states.DELETED:
            log.debug( "Job %s deleted by user before it entered the PBS queue" % job_wrapper.job_id )
            pbs.pbs_disconnect(c)
            if job_wrapper.cleanup_job in ( "always", "onsuccess" ):
#.........这里部分代码省略.........
开发者ID:AAFC-MBB,项目名称:galaxy-1,代码行数:103,代码来源:pbs.py


注:本文中的galaxy.jobs.runners.AsynchronousJobState.job_file方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。