当前位置: 首页>>代码示例>>Python>>正文


Python sleeper.Sleeper类代码示例

本文整理汇总了Python中galaxy.util.sleeper.Sleeper的典型用法代码示例。如果您正苦于以下问题:Python Sleeper类的具体用法?Python Sleeper怎么用?Python Sleeper使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Sleeper类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _initialize

    def _initialize(self):
        if boto is None:
            raise Exception(NO_BOTO_ERROR_MESSAGE)

        # for multipart upload
        self.s3server = {'access_key': self.access_key,
                         'secret_key': self.secret_key,
                         'is_secure': self.is_secure,
                         'max_chunk_size': self.max_chunk_size,
                         'host': self.host,
                         'port': self.port,
                         'use_rr': self.use_rr,
                         'conn_path': self.conn_path}

        self._configure_connection()
        self.bucket = self._get_bucket(self.bucket)
        # Clean cache only if value is set in galaxy.ini
        if self.cache_size != -1:
            # Convert GBs to bytes for comparison
            self.cache_size = self.cache_size * 1073741824
            # Helper for interruptable sleep
            self.sleeper = Sleeper()
            self.cache_monitor_thread = threading.Thread(target=self.__cache_monitor)
            self.cache_monitor_thread.start()
            log.info("Cache cleaner manager started")
        # Test if 'axel' is available for parallel download and pull the key into cache
        if which('axel'):
            self.use_axel = True
        else:
            self.use_axel = False
开发者ID:msauria,项目名称:galaxy,代码行数:30,代码来源:s3.py

示例2: __init__

    def __init__(self, config, config_xml=None, fsmon=False):
        """
        Create a new DistributedObjectStore.

        config -- Python object with any number of attributes. Most likely
            populated from `galaxy/config.ini`. The DistributedObjectStore
            specific attributes are:
                distributed_object_store_config_file
        config_xml -- An ElementTree object
        fsmon -- boolean. If true, monitor the file system for free space,
            removing backends when they get too full.
        """
        super(DistributedObjectStore, self).__init__(config,
                config_xml=config_xml)
        if config_xml is None:
            self.distributed_config = config.distributed_object_store_config_file
            assert self.distributed_config is not None, \
                "distributed object store ('object_store = distributed') " \
                "requires a config file, please set one in " \
                "'distributed_object_store_config_file')"
        self.backends = {}
        self.weighted_backend_ids = []
        self.original_weighted_backend_ids = []
        self.max_percent_full = {}
        self.global_max_percent_full = 0.0
        random.seed()
        self.__parse_distributed_config(config, config_xml)
        self.sleeper = None
        if fsmon and ( self.global_max_percent_full or filter( lambda x: x != 0.0, self.max_percent_full.values() ) ):
            self.sleeper = Sleeper()
            self.filesystem_monitor_thread = threading.Thread(target=self.__filesystem_monitor)
            self.filesystem_monitor_thread.setDaemon( True )
            self.filesystem_monitor_thread.start()
            log.info("Filesystem space monitor started")
开发者ID:BenjaminHCCarr,项目名称:galaxy,代码行数:34,代码来源:__init__.py

示例3: __init__

    def __init__( self, app, dispatcher ):
        """Start the job manager"""
        self.app = app
        self.dispatcher = dispatcher

        self.sa_session = app.model.context
        self.track_jobs_in_database = self.app.config.track_jobs_in_database

        # Initialize structures for handling job limits
        self.__clear_user_job_count()

        # Keep track of the pid that started the job manager, only it
        # has valid threads
        self.parent_pid = os.getpid()
        # Contains new jobs. Note this is not used if track_jobs_in_database is True
        self.queue = Queue()
        # Contains jobs that are waiting (only use from monitor thread)
        self.waiting_jobs = []
        # Contains wrappers of jobs that are limited or ready (so they aren't created unnecessarily/multiple times)
        self.job_wrappers = {}
        # Helper for interruptable sleep
        self.sleeper = Sleeper()
        self.running = True
        self.monitor_thread = threading.Thread( name="JobHandlerQueue.monitor_thread", target=self.__monitor )
        self.monitor_thread.setDaemon( True )
开发者ID:jj-umn,项目名称:galaxy-central,代码行数:25,代码来源:handler.py

示例4: __init__

 def __init__(self, config, config_xml):
     if boto is None:
         raise Exception(NO_BOTO_ERROR_MESSAGE)
     super(S3ObjectStore, self).__init__(config, config_xml)
     self.config = config
     self.staging_path = self.config.file_path
     self.transfer_progress = 0
     self._parse_config_xml(config_xml)
     self._configure_connection()
     self.bucket = self._get_bucket(self.bucket)
     # Clean cache only if value is set in universe_wsgi.ini
     if self.cache_size != -1:
         # Convert GBs to bytes for comparison
         self.cache_size = self.cache_size * 1073741824
         # Helper for interruptable sleep
         self.sleeper = Sleeper()
         self.cache_monitor_thread = threading.Thread(target=self.__cache_monitor)
         self.cache_monitor_thread.start()
         log.info("Cache cleaner manager started")
     # Test if 'axel' is available for parallel download and pull the key into cache
     try:
         subprocess.call('axel')
         self.use_axel = True
     except OSError:
         self.use_axel = False
开发者ID:emrobe,项目名称:META-pipe,代码行数:25,代码来源:s3.py

示例5: __init__

 def __init__(self, app):
     self.app = app
     self.sa_session = app.model.context.current
     self.queue = Queue()
     self.plugins = {}
     self._load_plugins()
     self.sleeper = Sleeper()
     self.running = True
     self.waiting_jobs = []
     self.__check_jobs_at_startup()
     self.monitor_thread = threading.Thread(target=self.__monitor)
     self.monitor_thread.start()
     log.info('Deferred job queue started')
开发者ID:bwlang,项目名称:galaxy,代码行数:13,代码来源:__init__.py

示例6: __init__

    def __init__(self, config, config_dict, fsmon=False):
        """
        :type config: object
        :param config: An object, most likely populated from
            `galaxy/config.ini`, having the same attributes needed by
            :class:`NestedObjectStore` plus:

            * distributed_object_store_config_file

        :type config_xml: ElementTree

        :type fsmon: bool
        :param fsmon: If True, monitor the file system for free space,
            removing backends when they get too full.
        """
        super(DistributedObjectStore, self).__init__(config)

        self.backends = {}
        self.weighted_backend_ids = []
        self.original_weighted_backend_ids = []
        self.max_percent_full = {}
        self.global_max_percent_full = config_dict.get("global_max_percent_full", 0)
        random.seed()

        backends_def = config_dict["backends"]
        for backend_def in backends_def:
            backened_id = backend_def["id"]
            file_path = backend_def["files_dir"]
            extra_dirs = backend_def.get("extra_dirs", [])
            maxpctfull = backend_def.get("max_percent_full", 0)
            weight = backend_def["weight"]
            disk_config_dict = dict(files_dir=file_path, extra_dirs=extra_dirs)
            self.backends[backened_id] = DiskObjectStore(config, disk_config_dict)
            self.max_percent_full[backened_id] = maxpctfull
            log.debug("Loaded disk backend '%s' with weight %s and file_path: %s" % (backened_id, weight, file_path))

            for i in range(0, weight):
                # The simplest way to do weighting: add backend ids to a
                # sequence the number of times equalling weight, then randomly
                # choose a backend from that sequence at creation
                self.weighted_backend_ids.append(backened_id)
        self.original_weighted_backend_ids = self.weighted_backend_ids

        self.sleeper = None
        if fsmon and (self.global_max_percent_full or [_ for _ in self.max_percent_full.values() if _ != 0.0]):
            self.sleeper = Sleeper()
            self.filesystem_monitor_thread = threading.Thread(target=self.__filesystem_monitor)
            self.filesystem_monitor_thread.setDaemon(True)
            self.filesystem_monitor_thread.start()
            log.info("Filesystem space monitor started")
开发者ID:msauria,项目名称:galaxy,代码行数:50,代码来源:__init__.py

示例7: _initialize

    def _initialize(self):
        if BlockBlobService is None:
            raise Exception(NO_BLOBSERVICE_ERROR_MESSAGE)

        self._configure_connection()

        # Clean cache only if value is set in galaxy.ini
        if self.cache_size != -1:
            # Convert GBs to bytes for comparison
            self.cache_size = self.cache_size * 1073741824
            # Helper for interruptable sleep
            self.sleeper = Sleeper()
            self.cache_monitor_thread = threading.Thread(target=self.__cache_monitor)
            self.cache_monitor_thread.start()
            log.info("Cache cleaner manager started")
开发者ID:msauria,项目名称:galaxy,代码行数:15,代码来源:azure_blob.py

示例8: __init__

    def __init__(self, config, config_xml):
        if BlockBlobService is None:
            raise Exception(NO_BLOBSERVICE_ERROR_MESSAGE)
        super(AzureBlobObjectStore, self).__init__(config)

        self.staging_path = self.config.file_path
        self.transfer_progress = 0
        self._parse_config_xml(config_xml)
        self._configure_connection()

        # Clean cache only if value is set in galaxy.ini
        if self.cache_size != -1:
            # Convert GBs to bytes for comparison
            self.cache_size = self.cache_size * 1073741824
            # Helper for interruptable sleep
            self.sleeper = Sleeper()
            self.cache_monitor_thread = threading.Thread(target=self.__cache_monitor)
            self.cache_monitor_thread.start()
            log.info("Cache cleaner manager started")
开发者ID:bwlang,项目名称:galaxy,代码行数:19,代码来源:azure_blob.py

示例9: __init__

 def __init__(self, config, config_xml=None, fsmon=False):
     super(DistributedObjectStore, self).__init__(config, config_xml=config_xml)
     if config_xml is None:
         self.distributed_config = config.distributed_object_store_config_file
         assert self.distributed_config is not None, "distributed object store ('object_store = distributed') " \
                                                     "requires a config file, please set one in " \
                                                     "'distributed_object_store_config_file')"
     self.backends = {}
     self.weighted_backend_ids = []
     self.original_weighted_backend_ids = []
     self.max_percent_full = {}
     self.global_max_percent_full = 0.0
     random.seed()
     self.__parse_distributed_config(config, config_xml)
     self.sleeper = None
     if fsmon and ( self.global_max_percent_full or filter( lambda x: x != 0.0, self.max_percent_full.values() ) ):
         self.sleeper = Sleeper()
         self.filesystem_monitor_thread = threading.Thread(target=self.__filesystem_monitor)
         self.filesystem_monitor_thread.setDaemon( True )
         self.filesystem_monitor_thread.start()
         log.info("Filesystem space monitor started")
开发者ID:HullUni-bioinformatics,项目名称:ReproPhyloGalaxy,代码行数:21,代码来源:__init__.py

示例10: _initialize

    def _initialize(self):
        if CloudProviderFactory is None:
            raise Exception(NO_CLOUDBRIDGE_ERROR_MESSAGE)

        self._configure_connection()
        self.bucket = self._get_bucket(self.bucket)
        # Clean cache only if value is set in galaxy.ini
        if self.cache_size != -1:
            # Convert GBs to bytes for comparison
            self.cache_size = self.cache_size * 1073741824
            # Helper for interruptable sleep
            self.sleeper = Sleeper()
            self.cache_monitor_thread = threading.Thread(target=self.__cache_monitor)
            self.cache_monitor_thread.start()
            log.info("Cache cleaner manager started")
        # Test if 'axel' is available for parallel download and pull the key into cache
        try:
            subprocess.call('axel')
            self.use_axel = True
        except OSError:
            self.use_axel = False
开发者ID:msauria,项目名称:galaxy,代码行数:21,代码来源:cloud.py

示例11: __init__

    def __init__(self, config, config_xml=None, fsmon=False):
        """
        :type config: object
        :param config: An object, most likely populated from
            `galaxy/config.ini`, having the same attributes needed by
            :class:`NestedObjectStore` plus:

            * distributed_object_store_config_file

        :type config_xml: ElementTree

        :type fsmon: bool
        :param fsmon: If True, monitor the file system for free space,
            removing backends when they get too full.
        """
        super(DistributedObjectStore, self).__init__(config,
                config_xml=config_xml)
        if config_xml is None:
            self.distributed_config = config.distributed_object_store_config_file
            assert self.distributed_config is not None, \
                "distributed object store ('object_store = distributed') " \
                "requires a config file, please set one in " \
                "'distributed_object_store_config_file')"
        self.backends = {}
        self.weighted_backend_ids = []
        self.original_weighted_backend_ids = []
        self.max_percent_full = {}
        self.global_max_percent_full = 0.0
        random.seed()
        self.__parse_distributed_config(config, config_xml)
        self.sleeper = None
        if fsmon and ( self.global_max_percent_full or [_ for _ in self.max_percent_full.values() if _ != 0.0] ):
            self.sleeper = Sleeper()
            self.filesystem_monitor_thread = threading.Thread(target=self.__filesystem_monitor)
            self.filesystem_monitor_thread.setDaemon( True )
            self.filesystem_monitor_thread.start()
            log.info("Filesystem space monitor started")
开发者ID:ashvark,项目名称:galaxy,代码行数:37,代码来源:__init__.py

示例12: DistributedObjectStore

class DistributedObjectStore(NestedObjectStore):

    """
    ObjectStore that defers to a list of backends.

    When getting objects the first store where the object exists is used.
    When creating objects they are created in a store selected randomly, but
    with weighting.
    """

    def __init__(self, config, config_xml=None, fsmon=False):
        """
        :type config: object
        :param config: An object, most likely populated from
            `galaxy/config.ini`, having the same attributes needed by
            :class:`NestedObjectStore` plus:

            * distributed_object_store_config_file

        :type config_xml: ElementTree

        :type fsmon: bool
        :param fsmon: If True, monitor the file system for free space,
            removing backends when they get too full.
        """
        super(DistributedObjectStore, self).__init__(config,
                config_xml=config_xml)
        if config_xml is None:
            self.distributed_config = config.distributed_object_store_config_file
            assert self.distributed_config is not None, \
                "distributed object store ('object_store = distributed') " \
                "requires a config file, please set one in " \
                "'distributed_object_store_config_file')"
        self.backends = {}
        self.weighted_backend_ids = []
        self.original_weighted_backend_ids = []
        self.max_percent_full = {}
        self.global_max_percent_full = 0.0
        random.seed()
        self.__parse_distributed_config(config, config_xml)
        self.sleeper = None
        if fsmon and ( self.global_max_percent_full or [_ for _ in self.max_percent_full.values() if _ != 0.0] ):
            self.sleeper = Sleeper()
            self.filesystem_monitor_thread = threading.Thread(target=self.__filesystem_monitor)
            self.filesystem_monitor_thread.setDaemon( True )
            self.filesystem_monitor_thread.start()
            log.info("Filesystem space monitor started")

    def __parse_distributed_config(self, config, config_xml=None):
        if config_xml is None:
            root = ElementTree.parse(self.distributed_config).getroot()
            log.debug('Loading backends for distributed object store from %s', self.distributed_config)
        else:
            root = config_xml.find('backends')
            log.debug('Loading backends for distributed object store from %s', config_xml.get('id'))
        self.global_max_percent_full = float(root.get('maxpctfull', 0))
        for elem in [ e for e in root if e.tag == 'backend' ]:
            id = elem.get('id')
            weight = int(elem.get('weight', 1))
            maxpctfull = float(elem.get('maxpctfull', 0))
            if elem.get('type', 'disk'):
                path = None
                extra_dirs = {}
                for sub in elem:
                    if sub.tag == 'files_dir':
                        path = sub.get('path')
                    elif sub.tag == 'extra_dir':
                        type = sub.get('type')
                        extra_dirs[type] = sub.get('path')
                self.backends[id] = DiskObjectStore(config, file_path=path, extra_dirs=extra_dirs)
                self.max_percent_full[id] = maxpctfull
                log.debug("Loaded disk backend '%s' with weight %s and file_path: %s" % (id, weight, path))
                if extra_dirs:
                    log.debug("    Extra directories:")
                    for type, dir in extra_dirs.items():
                        log.debug("        %s: %s" % (type, dir))
            for i in range(0, weight):
                # The simplest way to do weighting: add backend ids to a
                # sequence the number of times equalling weight, then randomly
                # choose a backend from that sequence at creation
                self.weighted_backend_ids.append(id)
        self.original_weighted_backend_ids = self.weighted_backend_ids

    def shutdown(self):
        """Shut down. Kill the free space monitor if there is one."""
        super(DistributedObjectStore, self).shutdown()
        if self.sleeper is not None:
            self.sleeper.wake()

    def __filesystem_monitor(self):
        while self.running:
            new_weighted_backend_ids = self.original_weighted_backend_ids
            for id, backend in self.backends.items():
                maxpct = self.max_percent_full[id] or self.global_max_percent_full
                pct = backend.get_store_usage_percent()
                if pct > maxpct:
                    new_weighted_backend_ids = [_ for _ in new_weighted_backend_ids if _ != id]
            self.weighted_backend_ids = new_weighted_backend_ids
            self.sleeper.sleep(120)  # Test free space every 2 minutes

#.........这里部分代码省略.........
开发者ID:ashvark,项目名称:galaxy,代码行数:101,代码来源:__init__.py

示例13: JobHandlerStopQueue

class JobHandlerStopQueue( object ):
    """
    A queue for jobs which need to be terminated prematurely.
    """
    STOP_SIGNAL = object()
    def __init__( self, app, dispatcher ):
        self.app = app
        self.dispatcher = dispatcher

        self.sa_session = app.model.context

        # Keep track of the pid that started the job manager, only it
        # has valid threads
        self.parent_pid = os.getpid()
        # Contains new jobs. Note this is not used if track_jobs_in_database is True
        self.queue = Queue()

        # Contains jobs that are waiting (only use from monitor thread)
        self.waiting = []

        # Helper for interruptable sleep
        self.sleeper = Sleeper()
        self.running = True
        self.monitor_thread = threading.Thread( name="JobHandlerStopQueue.monitor_thread", target=self.monitor )
        self.monitor_thread.setDaemon( True )
        self.monitor_thread.start()
        log.info( "job handler stop queue started" )

    def monitor( self ):
        """
        Continually iterate the waiting jobs, stop any that are found.
        """
        # HACK: Delay until after forking, we need a way to do post fork notification!!!
        time.sleep( 10 )
        while self.running:
            try:
                self.monitor_step()
            except:
                log.exception( "Exception in monitor_step" )
            # Sleep
            self.sleeper.sleep( 1 )

    def monitor_step( self ):
        """
        Called repeatedly by `monitor` to stop jobs.
        """
        # Pull all new jobs from the queue at once
        jobs_to_check = []
        if self.app.config.track_jobs_in_database:
            # Clear the session so we get fresh states for job and all datasets
            self.sa_session.expunge_all()
            # Fetch all new jobs
            newly_deleted_jobs = self.sa_session.query( model.Job ).enable_eagerloads( False ) \
                                     .filter( ( model.Job.state == model.Job.states.DELETED_NEW ) \
                                              & ( model.Job.handler == self.app.config.server_name ) ).all()
            for job in newly_deleted_jobs:
                jobs_to_check.append( ( job, job.stderr ) )
        # Also pull from the queue (in the case of Administrative stopped jobs)
        try:
            while 1:
                message = self.queue.get_nowait()
                if message is self.STOP_SIGNAL:
                    return
                # Unpack the message
                job_id, error_msg = message
                # Get the job object and append to watch queue
                jobs_to_check.append( ( self.sa_session.query( model.Job ).get( job_id ), error_msg ) )
        except Empty:
            pass
        for job, error_msg in jobs_to_check:
            if error_msg is not None:
                job.state = job.states.ERROR
                job.info = error_msg
            else:
                job.state = job.states.DELETED
            self.sa_session.add( job )
            self.sa_session.flush()
            if job.job_runner_name is not None:
                # tell the dispatcher to stop the job
                self.dispatcher.stop( job )

    def put( self, job_id, error_msg=None ):
        self.queue.put( ( job_id, error_msg ) )

    def shutdown( self ):
        """Attempts to gracefully shut down the worker thread"""
        if self.parent_pid != os.getpid():
            # We're not the real job queue, do nothing
            return
        else:
            log.info( "sending stop signal to worker thread" )
            self.running = False
            if not self.app.config.track_jobs_in_database:
                self.queue.put( self.STOP_SIGNAL )
            self.sleeper.wake()
            log.info( "job handler stop queue stopped" )
开发者ID:jj-umn,项目名称:galaxy-central,代码行数:96,代码来源:handler.py

示例14: JobHandlerQueue

class JobHandlerQueue( object ):
    """
    Job Handler's Internal Queue, this is what actually implements waiting for
    jobs to be runnable and dispatching to a JobRunner.
    """
    STOP_SIGNAL = object()

    def __init__( self, app, dispatcher ):
        """Initializes the Job Handler Queue, creates (unstarted) monitoring thread"""
        self.app = app
        self.dispatcher = dispatcher

        self.sa_session = app.model.context
        self.track_jobs_in_database = self.app.config.track_jobs_in_database

        # Initialize structures for handling job limits
        self.__clear_job_count()

        # Keep track of the pid that started the job manager, only it
        # has valid threads
        self.parent_pid = os.getpid()
        # Contains new jobs. Note this is not used if track_jobs_in_database is True
        self.queue = Queue()
        # Contains jobs that are waiting (only use from monitor thread)
        self.waiting_jobs = []
        # Contains wrappers of jobs that are limited or ready (so they aren't created unnecessarily/multiple times)
        self.job_wrappers = {}
        # Helper for interruptable sleep
        self.sleeper = Sleeper()
        self.running = True
        self.monitor_thread = threading.Thread( name="JobHandlerQueue.monitor_thread", target=self.__monitor )
        self.monitor_thread.setDaemon( True )

    def start( self ):
        """
        Starts the JobHandler's thread after checking for any unhandled jobs.
        """
        # Recover jobs at startup
        self.__check_jobs_at_startup()
        # Start the queue
        self.monitor_thread.start()
        log.info( "job handler queue started" )

    def job_wrapper( self, job, use_persisted_destination=False ):
        return JobWrapper( job, self, use_persisted_destination=use_persisted_destination )

    def job_pair_for_id( self, id ):
        job = self.sa_session.query( model.Job ).get( id )
        return job, self.job_wrapper( job, use_persisted_destination=True )

    def __check_jobs_at_startup( self ):
        """
        Checks all jobs that are in the 'new', 'queued' or 'running' state in
        the database and requeues or cleans up as necessary.  Only run as the
        job handler starts.
        In case the activation is enforced it will filter out the jobs of inactive users.
        """
        jobs_at_startup = []
        if self.track_jobs_in_database:
            in_list = ( model.Job.states.QUEUED,
                        model.Job.states.RUNNING )
        else:
            in_list = ( model.Job.states.NEW,
                        model.Job.states.QUEUED,
                        model.Job.states.RUNNING )
        if self.app.config.user_activation_on:
                jobs_at_startup = self.sa_session.query( model.Job ).enable_eagerloads( False ) \
                    .outerjoin( model.User ) \
                    .filter( model.Job.state.in_( in_list ) &
                             ( model.Job.handler == self.app.config.server_name ) &
                             or_( ( model.Job.user_id == null() ), ( model.User.active == true() ) ) ).all()
        else:
            jobs_at_startup = self.sa_session.query( model.Job ).enable_eagerloads( False ) \
                .filter( model.Job.state.in_( in_list ) &
                         ( model.Job.handler == self.app.config.server_name ) ).all()

        for job in jobs_at_startup:
            if not self.app.toolbox.has_tool( job.tool_id, job.tool_version, exact=True ):
                log.warning( "(%s) Tool '%s' removed from tool config, unable to recover job" % ( job.id, job.tool_id ) )
                self.job_wrapper( job ).fail( 'This tool was disabled before the job completed.  Please contact your Galaxy administrator.' )
            elif job.job_runner_name is not None and job.job_runner_external_id is None:
                # This could happen during certain revisions of Galaxy where a runner URL was persisted before the job was dispatched to a runner.
                log.debug( "(%s) Job runner assigned but no external ID recorded, adding to the job handler queue" % job.id )
                job.job_runner_name = None
                if self.track_jobs_in_database:
                    job.set_state( model.Job.states.NEW )
                else:
                    self.queue.put( ( job.id, job.tool_id ) )
            elif job.job_runner_name is not None and job.job_runner_external_id is not None and job.destination_id is None:
                # This is the first start after upgrading from URLs to destinations, convert the URL to a destination and persist
                job_wrapper = self.job_wrapper( job )
                job_destination = self.dispatcher.url_to_destination(job.job_runner_name)
                if job_destination.id is None:
                    job_destination.id = 'legacy_url'
                job_wrapper.set_job_destination(job_destination, job.job_runner_external_id)
                self.dispatcher.recover( job, job_wrapper )
                log.info('(%s) Converted job from a URL to a destination and recovered' % (job.id))
            elif job.job_runner_name is None:
                # Never (fully) dispatched
                log.debug( "(%s) No job runner assigned and job still in '%s' state, adding to the job handler queue" % ( job.id, job.state ) )
#.........这里部分代码省略.........
开发者ID:ashvark,项目名称:galaxy,代码行数:101,代码来源:handler.py

示例15: JobHandlerQueue

class JobHandlerQueue( object ):
    """
    Job manager, waits for jobs to be runnable and then dispatches to
    a JobRunner.
    """
    STOP_SIGNAL = object()

    def __init__( self, app, dispatcher ):
        """Start the job manager"""
        self.app = app
        self.dispatcher = dispatcher

        self.sa_session = app.model.context
        self.track_jobs_in_database = self.app.config.track_jobs_in_database

        # Initialize structures for handling job limits
        self.__clear_user_job_count()

        # Keep track of the pid that started the job manager, only it
        # has valid threads
        self.parent_pid = os.getpid()
        # Contains new jobs. Note this is not used if track_jobs_in_database is True
        self.queue = Queue()
        # Contains jobs that are waiting (only use from monitor thread)
        self.waiting_jobs = []
        # Contains wrappers of jobs that are limited or ready (so they aren't created unnecessarily/multiple times)
        self.job_wrappers = {}
        # Helper for interruptable sleep
        self.sleeper = Sleeper()
        self.running = True
        self.monitor_thread = threading.Thread( name="JobHandlerQueue.monitor_thread", target=self.__monitor )
        self.monitor_thread.setDaemon( True )

    def start( self ):
        """
        The JobManager should start, and then start its Handler, if it has one.
        """
        # Recover jobs at startup
        self.__check_jobs_at_startup()
        # Start the queue
        self.monitor_thread.start()
        log.info( "job handler queue started" )

    def __check_jobs_at_startup( self ):
        """
        Checks all jobs that are in the 'new', 'queued' or 'running' state in
        the database and requeues or cleans up as necessary.  Only run as the
        job handler starts.
        In case the activation is enforced it will filter out the jobs of inactive users.
        """
        jobs_at_startup = []
        if self.app.config.user_activation_on:
                jobs_at_startup = self.sa_session.query( model.Job ).enable_eagerloads( False ) \
                                  .outerjoin( model.User ) \
                                  .filter( ( ( model.Job.state == model.Job.states.NEW ) \
                                             | ( model.Job.state == model.Job.states.RUNNING ) \
                                             | ( model.Job.state == model.Job.states.QUEUED ) ) \
                                           & ( model.Job.handler == self.app.config.server_name ) \
                                           & or_( ( model.Job.user_id == None ),( model.User.active == True ) ) ).all()
        else:
            jobs_at_startup = self.sa_session.query( model.Job ).enable_eagerloads( False ) \
                              .filter( ( ( model.Job.state == model.Job.states.NEW ) \
                                         | ( model.Job.state == model.Job.states.RUNNING ) \
                                         | ( model.Job.state == model.Job.states.QUEUED ) ) \
                                       & ( model.Job.handler == self.app.config.server_name ) ).all()
                
        for job in jobs_at_startup:
            if job.tool_id not in self.app.toolbox.tools_by_id:
                log.warning( "(%s) Tool '%s' removed from tool config, unable to recover job" % ( job.id, job.tool_id ) )
                JobWrapper( job, self ).fail( 'This tool was disabled before the job completed.  Please contact your Galaxy administrator.' )
            if job.job_runner_name is not None and job.job_runner_external_id is None:
                # This could happen during certain revisions of Galaxy where a runner URL was persisted before the job was dispatched to a runner.
                log.debug( "(%s) Job runner assigned but no external ID recorded, adding to the job handler queue" % job.id )
                job.job_runner_name = None
                if self.track_jobs_in_database:
                    job.state = model.Job.states.NEW
                else:
                    self.queue.put( ( job.id, job.tool_id ) )
            elif job.job_runner_name is not None and job.job_runner_external_id is not None and job.destination_id is None:
                # This is the first start after upgrading from URLs to destinations, convert the URL to a destination and persist
                # TODO: test me extensively
                job_wrapper = JobWrapper( job, self )
                job_destination = self.dispatcher.url_to_destination(job.job_runner_name)
                if job_destination.id is None:
                    job_destination.id = 'legacy_url'
                job_wrapper.set_job_destination(job_destination, job.job_runner_external_id)
                self.dispatcher.recover( job, job_wrapper )
                log.info('(%s) Converted job from a URL to a destination and recovered' % (job.id))
            elif job.job_runner_name is None:
                # Never (fully) dispatched
                log.debug( "(%s) No job runner assigned and job still in '%s' state, adding to the job handler queue" % ( job.id, job.state ) )
                if self.track_jobs_in_database:
                    job.state = model.Job.states.NEW
                else:
                    self.queue.put( ( job.id, job.tool_id ) )
            else:
                # Already dispatched and running
                job_wrapper = JobWrapper( job, self )
                job_wrapper.job_runner_mapper.cached_job_destination = JobDestination(id=job.destination_id, runner=job.job_runner_name, params=job.destination_params)
                self.dispatcher.recover( job, job_wrapper )
#.........这里部分代码省略.........
开发者ID:jj-umn,项目名称:galaxy-central,代码行数:101,代码来源:handler.py


注:本文中的galaxy.util.sleeper.Sleeper类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。