本文整理汇总了Python中galaxy.util.sleeper.Sleeper.wake方法的典型用法代码示例。如果您正苦于以下问题:Python Sleeper.wake方法的具体用法?Python Sleeper.wake怎么用?Python Sleeper.wake使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类galaxy.util.sleeper.Sleeper
的用法示例。
在下文中一共展示了Sleeper.wake方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: DistributedObjectStore
# 需要导入模块: from galaxy.util.sleeper import Sleeper [as 别名]
# 或者: from galaxy.util.sleeper.Sleeper import wake [as 别名]
class DistributedObjectStore(NestedObjectStore):
"""
ObjectStore that defers to a list of backends.
When getting objects the first store where the object exists is used.
When creating objects they are created in a store selected randomly, but
with weighting.
"""
def __init__(self, config, config_xml=None, fsmon=False):
"""
:type config: object
:param config: An object, most likely populated from
`galaxy/config.ini`, having the same attributes needed by
:class:`NestedObjectStore` plus:
* distributed_object_store_config_file
:type config_xml: ElementTree
:type fsmon: bool
:param fsmon: If True, monitor the file system for free space,
removing backends when they get too full.
"""
super(DistributedObjectStore, self).__init__(config,
config_xml=config_xml)
if config_xml is None:
self.distributed_config = config.distributed_object_store_config_file
assert self.distributed_config is not None, \
"distributed object store ('object_store = distributed') " \
"requires a config file, please set one in " \
"'distributed_object_store_config_file')"
self.backends = {}
self.weighted_backend_ids = []
self.original_weighted_backend_ids = []
self.max_percent_full = {}
self.global_max_percent_full = 0.0
random.seed()
self.__parse_distributed_config(config, config_xml)
self.sleeper = None
if fsmon and ( self.global_max_percent_full or [_ for _ in self.max_percent_full.values() if _ != 0.0] ):
self.sleeper = Sleeper()
self.filesystem_monitor_thread = threading.Thread(target=self.__filesystem_monitor)
self.filesystem_monitor_thread.setDaemon( True )
self.filesystem_monitor_thread.start()
log.info("Filesystem space monitor started")
def __parse_distributed_config(self, config, config_xml=None):
if config_xml is None:
root = ElementTree.parse(self.distributed_config).getroot()
log.debug('Loading backends for distributed object store from %s', self.distributed_config)
else:
root = config_xml.find('backends')
log.debug('Loading backends for distributed object store from %s', config_xml.get('id'))
self.global_max_percent_full = float(root.get('maxpctfull', 0))
for elem in [ e for e in root if e.tag == 'backend' ]:
id = elem.get('id')
weight = int(elem.get('weight', 1))
maxpctfull = float(elem.get('maxpctfull', 0))
if elem.get('type', 'disk'):
path = None
extra_dirs = {}
for sub in elem:
if sub.tag == 'files_dir':
path = sub.get('path')
elif sub.tag == 'extra_dir':
type = sub.get('type')
extra_dirs[type] = sub.get('path')
self.backends[id] = DiskObjectStore(config, file_path=path, extra_dirs=extra_dirs)
self.max_percent_full[id] = maxpctfull
log.debug("Loaded disk backend '%s' with weight %s and file_path: %s" % (id, weight, path))
if extra_dirs:
log.debug(" Extra directories:")
for type, dir in extra_dirs.items():
log.debug(" %s: %s" % (type, dir))
for i in range(0, weight):
# The simplest way to do weighting: add backend ids to a
# sequence the number of times equalling weight, then randomly
# choose a backend from that sequence at creation
self.weighted_backend_ids.append(id)
self.original_weighted_backend_ids = self.weighted_backend_ids
def shutdown(self):
"""Shut down. Kill the free space monitor if there is one."""
super(DistributedObjectStore, self).shutdown()
if self.sleeper is not None:
self.sleeper.wake()
def __filesystem_monitor(self):
while self.running:
new_weighted_backend_ids = self.original_weighted_backend_ids
for id, backend in self.backends.items():
maxpct = self.max_percent_full[id] or self.global_max_percent_full
pct = backend.get_store_usage_percent()
if pct > maxpct:
new_weighted_backend_ids = [_ for _ in new_weighted_backend_ids if _ != id]
self.weighted_backend_ids = new_weighted_backend_ids
self.sleeper.sleep(120) # Test free space every 2 minutes
#.........这里部分代码省略.........
示例2: JobHandlerStopQueue
# 需要导入模块: from galaxy.util.sleeper import Sleeper [as 别名]
# 或者: from galaxy.util.sleeper.Sleeper import wake [as 别名]
class JobHandlerStopQueue( object ):
"""
A queue for jobs which need to be terminated prematurely.
"""
STOP_SIGNAL = object()
def __init__( self, app, dispatcher ):
self.app = app
self.dispatcher = dispatcher
self.sa_session = app.model.context
# Keep track of the pid that started the job manager, only it
# has valid threads
self.parent_pid = os.getpid()
# Contains new jobs. Note this is not used if track_jobs_in_database is True
self.queue = Queue()
# Contains jobs that are waiting (only use from monitor thread)
self.waiting = []
# Helper for interruptable sleep
self.sleeper = Sleeper()
self.running = True
self.monitor_thread = threading.Thread( name="JobHandlerStopQueue.monitor_thread", target=self.monitor )
self.monitor_thread.setDaemon( True )
self.monitor_thread.start()
log.info( "job handler stop queue started" )
def monitor( self ):
"""
Continually iterate the waiting jobs, stop any that are found.
"""
# HACK: Delay until after forking, we need a way to do post fork notification!!!
time.sleep( 10 )
while self.running:
try:
self.monitor_step()
except:
log.exception( "Exception in monitor_step" )
# Sleep
self.sleeper.sleep( 1 )
def monitor_step( self ):
"""
Called repeatedly by `monitor` to stop jobs.
"""
# Pull all new jobs from the queue at once
jobs_to_check = []
if self.app.config.track_jobs_in_database:
# Clear the session so we get fresh states for job and all datasets
self.sa_session.expunge_all()
# Fetch all new jobs
newly_deleted_jobs = self.sa_session.query( model.Job ).enable_eagerloads( False ) \
.filter( ( model.Job.state == model.Job.states.DELETED_NEW ) \
& ( model.Job.handler == self.app.config.server_name ) ).all()
for job in newly_deleted_jobs:
jobs_to_check.append( ( job, job.stderr ) )
# Also pull from the queue (in the case of Administrative stopped jobs)
try:
while 1:
message = self.queue.get_nowait()
if message is self.STOP_SIGNAL:
return
# Unpack the message
job_id, error_msg = message
# Get the job object and append to watch queue
jobs_to_check.append( ( self.sa_session.query( model.Job ).get( job_id ), error_msg ) )
except Empty:
pass
for job, error_msg in jobs_to_check:
if error_msg is not None:
job.state = job.states.ERROR
job.info = error_msg
else:
job.state = job.states.DELETED
self.sa_session.add( job )
self.sa_session.flush()
if job.job_runner_name is not None:
# tell the dispatcher to stop the job
self.dispatcher.stop( job )
def put( self, job_id, error_msg=None ):
self.queue.put( ( job_id, error_msg ) )
def shutdown( self ):
"""Attempts to gracefully shut down the worker thread"""
if self.parent_pid != os.getpid():
# We're not the real job queue, do nothing
return
else:
log.info( "sending stop signal to worker thread" )
self.running = False
if not self.app.config.track_jobs_in_database:
self.queue.put( self.STOP_SIGNAL )
self.sleeper.wake()
log.info( "job handler stop queue stopped" )
示例3: DeferredJobQueue
# 需要导入模块: from galaxy.util.sleeper import Sleeper [as 别名]
# 或者: from galaxy.util.sleeper.Sleeper import wake [as 别名]
#.........这里部分代码省略.........
for obj in module.__all__:
display_name = ':'.join((module_name, obj))
plugin = getattr(module, obj)
for name in ('check_job', 'run_job'):
if name not in dir(plugin):
log.error('Plugin "%s" does not contain required method "%s()"' % (display_name, name))
break
else:
self.plugins[obj] = plugin(self.app)
self.plugins[obj].job_states = self.job_states
log.debug('Loaded deferred job plugin: %s' % display_name)
def __check_jobs_at_startup(self):
waiting_jobs = self.sa_session.query(model.DeferredJob) \
.filter(model.DeferredJob.state == model.DeferredJob.states.WAITING).all()
for job in waiting_jobs:
if not self.__check_job_plugin(job):
continue
if 'check_interval' in dir(self.plugins[job.plugin]):
job.check_interval = self.plugins[job.plugin].check_interval
log.info('Recovered deferred job (id: %s) at startup' % job.id)
# Pass the job ID as opposed to the job, since the monitor thread
# needs to load it in its own threadlocal scoped session.
self.waiting_jobs.append(job.id)
def __monitor(self):
while self.running:
try:
self.__monitor_step()
except:
log.exception('Exception in monitor_step')
self.sleeper.sleep(1)
log.info('job queue stopped')
def __monitor_step(self):
# TODO: Querying the database with this frequency is bad, we need message passing
new_jobs = self.sa_session.query(model.DeferredJob) \
.filter(model.DeferredJob.state == model.DeferredJob.states.NEW).all()
for job in new_jobs:
if not self.__check_job_plugin(job):
continue
job.state = model.DeferredJob.states.WAITING
self.sa_session.add(job)
self.sa_session.flush()
if 'check_interval' in dir(self.plugins[job.plugin]):
job.check_interval = self.plugins[job.plugin].check_interval
self.waiting_jobs.append(job)
new_waiting = []
for job in self.waiting_jobs:
try:
# Recovered jobs are passed in by ID
assert type(job) is int
job = self.sa_session.query(model.DeferredJob).get(job)
except:
pass
if job.is_check_time:
try:
job_state = self.plugins[job.plugin].check_job(job)
except Exception:
self.__fail_job(job)
log.exception('Set deferred job %s to error because of an exception in check_job()' % job.id)
continue
if job_state == self.job_states.READY:
try:
self.plugins[job.plugin].run_job(job)
except Exception:
self.__fail_job(job)
log.exception('Set deferred job %s to error because of an exception in run_job()' % job.id)
continue
elif job_state == self.job_states.INVALID:
self.__fail_job(job)
log.error('Unable to run deferred job (id: %s): Plugin "%s" marked it as invalid' % (job.id, job.plugin))
continue
else:
new_waiting.append(job)
job.last_check = 'now'
else:
new_waiting.append(job)
self.waiting_jobs = new_waiting
def __check_job_plugin(self, job):
if job.plugin not in self.plugins:
log.error('Invalid deferred job plugin: %s') % job.plugin
job.state = model.DeferredJob.states.ERROR
self.sa_session.add(job)
self.sa_session.flush()
return False
return True
def __check_if_ready_to_run(self, job):
return self.plugins[job.plugin].check_job(job)
def __fail_job(self, job):
job.state = model.DeferredJob.states.ERROR
self.sa_session.add(job)
self.sa_session.flush()
def shutdown(self):
self.running = False
self.sleeper.wake()
示例4: JobHandlerQueue
# 需要导入模块: from galaxy.util.sleeper import Sleeper [as 别名]
# 或者: from galaxy.util.sleeper.Sleeper import wake [as 别名]
#.........这里部分代码省略.........
# limits
if job.user:
# Check the hard limit first
if self.app.job_config.limits.registered_user_concurrent_jobs:
count = self.get_user_job_count(job.user_id)
# Check the user's number of dispatched jobs against the overall limit
if count >= self.app.job_config.limits.registered_user_concurrent_jobs:
return JOB_WAIT
# If we pass the hard limit, also check the per-destination count
id = job_wrapper.job_destination.id
count_per_id = self.get_user_job_count_per_destination(job.user_id)
if id in self.app.job_config.limits.destination_user_concurrent_jobs:
count = count_per_id.get(id, 0)
# Check the user's number of dispatched jobs in the assigned destination id against the limit for that id
if count >= self.app.job_config.limits.destination_user_concurrent_jobs[id]:
return JOB_WAIT
# If we pass the destination limit (if there is one), also check limits on any tags (if any)
if job_wrapper.job_destination.tags:
for tag in job_wrapper.job_destination.tags:
# Check each tag for this job's destination
if tag in self.app.job_config.limits.destination_user_concurrent_jobs:
# Only if there's a limit defined for this tag
count = 0
for id in [ d.id for d in self.app.job_config.get_destinations(tag) ]:
# Add up the aggregate job total for this tag
count += count_per_id.get(id, 0)
if count >= self.app.job_config.limits.destination_user_concurrent_jobs[tag]:
return JOB_WAIT
elif job.galaxy_session:
# Anonymous users only get the hard limit
if self.app.job_config.limits.anonymous_user_concurrent_jobs:
count = self.sa_session.query( model.Job ).enable_eagerloads( False ) \
.filter( and_( model.Job.session_id == job.galaxy_session.id,
or_( model.Job.state == model.Job.states.RUNNING,
model.Job.state == model.Job.states.QUEUED ) ) ).count()
if count >= self.app.job_config.limits.anonymous_user_concurrent_jobs:
return JOB_WAIT
else:
log.warning( 'Job %s is not associated with a user or session so job concurrency limit cannot be checked.' % job.id )
return JOB_READY
def __cache_total_job_count_per_destination( self ):
# Cache the job count if necessary
if self.total_job_count_per_destination is None:
self.total_job_count_per_destination = {}
result = self.sa_session.execute(select([model.Job.table.c.destination_id, func.count(model.Job.table.c.destination_id).label('job_count')])
.where(and_(model.Job.table.c.state.in_((model.Job.states.QUEUED, model.Job.states.RUNNING))))
.group_by(model.Job.table.c.destination_id))
for row in result:
self.total_job_count_per_destination[row['destination_id']] = row['job_count']
def get_total_job_count_per_destination(self):
self.__cache_total_job_count_per_destination()
# Always use caching (at worst a job will have to wait one iteration,
# and this would be more fair anyway as it ensures FIFO scheduling,
# insofar as FIFO would be fair...)
return self.total_job_count_per_destination
def __check_destination_jobs( self, job, job_wrapper ):
if self.app.job_config.limits.destination_total_concurrent_jobs:
id = job_wrapper.job_destination.id
count_per_id = self.get_total_job_count_per_destination()
if id in self.app.job_config.limits.destination_total_concurrent_jobs:
count = count_per_id.get(id, 0)
# Check the number of dispatched jobs in the assigned destination id against the limit for that id
if count >= self.app.job_config.limits.destination_total_concurrent_jobs[id]:
return JOB_WAIT
# If we pass the destination limit (if there is one), also check limits on any tags (if any)
if job_wrapper.job_destination.tags:
for tag in job_wrapper.job_destination.tags:
# Check each tag for this job's destination
if tag in self.app.job_config.limits.destination_total_concurrent_jobs:
# Only if there's a limit defined for this tag
count = 0
for id in [ d.id for d in self.app.job_config.get_destinations(tag) ]:
# Add up the aggregate job total for this tag
count += count_per_id.get(id, 0)
if count >= self.app.job_config.limits.destination_total_concurrent_jobs[tag]:
return JOB_WAIT
return JOB_READY
def put( self, job_id, tool_id ):
"""Add a job to the queue (by job identifier)"""
if not self.track_jobs_in_database:
self.queue.put( ( job_id, tool_id ) )
self.sleeper.wake()
def shutdown( self ):
"""Attempts to gracefully shut down the worker thread"""
if self.parent_pid != os.getpid():
# We're not the real job queue, do nothing
return
else:
log.info( "sending stop signal to worker thread" )
self.running = False
if not self.app.config.track_jobs_in_database:
self.queue.put( self.STOP_SIGNAL )
self.sleeper.wake()
log.info( "job handler queue stopped" )
self.dispatcher.shutdown()
示例5: DistributedObjectStore
# 需要导入模块: from galaxy.util.sleeper import Sleeper [as 别名]
# 或者: from galaxy.util.sleeper.Sleeper import wake [as 别名]
#.........这里部分代码省略.........
}
backends.append(backend_dict)
return config_dict
@classmethod
def from_xml(clazz, config, config_xml, fsmon=False):
legacy = False
if config_xml is None:
distributed_config = config.distributed_object_store_config_file
assert distributed_config is not None, \
"distributed object store ('object_store = distributed') " \
"requires a config file, please set one in " \
"'distributed_object_store_config_file')"
log.debug('Loading backends for distributed object store from %s', distributed_config)
config_xml = ElementTree.parse(distributed_config).getroot()
legacy = True
else:
log.debug('Loading backends for distributed object store from %s', config_xml.get('id'))
config_dict = clazz.parse_xml(config_xml, legacy=legacy)
return clazz(config, config_dict, fsmon=fsmon)
def to_dict(self):
as_dict = super(DistributedObjectStore, self).to_dict()
as_dict["global_max_percent_full"] = self.global_max_percent_full
backends = []
for backend_id, backend in self.backends.items():
backend_as_dict = backend.to_dict()
backend_as_dict["id"] = backend_id
backend_as_dict["max_percent_full"] = self.max_percent_full[backend_id]
backend_as_dict["weight"] = len([i for i in self.original_weighted_backend_ids if i == backend_id])
backends.append(backend_as_dict)
as_dict["backends"] = backends
return as_dict
def shutdown(self):
"""Shut down. Kill the free space monitor if there is one."""
super(DistributedObjectStore, self).shutdown()
if self.sleeper is not None:
self.sleeper.wake()
def __filesystem_monitor(self):
while self.running:
new_weighted_backend_ids = self.original_weighted_backend_ids
for id, backend in self.backends.items():
maxpct = self.max_percent_full[id] or self.global_max_percent_full
pct = backend.get_store_usage_percent()
if pct > maxpct:
new_weighted_backend_ids = [_ for _ in new_weighted_backend_ids if _ != id]
self.weighted_backend_ids = new_weighted_backend_ids
self.sleeper.sleep(120) # Test free space every 2 minutes
def create(self, obj, **kwargs):
"""The only method in which obj.object_store_id may be None."""
if obj.object_store_id is None or not self.exists(obj, **kwargs):
if obj.object_store_id is None or obj.object_store_id not in self.backends:
try:
obj.object_store_id = random.choice(self.weighted_backend_ids)
except IndexError:
raise ObjectInvalid('objectstore.create, could not generate '
'obj.object_store_id: %s, kwargs: %s'
% (str(obj), str(kwargs)))
_create_object_in_session(obj)
log.debug("Selected backend '%s' for creation of %s %s"
% (obj.object_store_id, obj.__class__.__name__, obj.id))
else:
log.debug("Using preferred backend '%s' for creation of %s %s"
% (obj.object_store_id, obj.__class__.__name__, obj.id))
self.backends[obj.object_store_id].create(obj, **kwargs)
def _call_method(self, method, obj, default, default_is_exception, **kwargs):
object_store_id = self.__get_store_id_for(obj, **kwargs)
if object_store_id is not None:
return self.backends[object_store_id].__getattribute__(method)(obj, **kwargs)
if default_is_exception:
raise default('objectstore, _call_method failed: %s on %s, kwargs: %s'
% (method, self._repr_object_for_exception(obj), str(kwargs)))
else:
return default
def __get_store_id_for(self, obj, **kwargs):
if obj.object_store_id is not None:
if obj.object_store_id in self.backends:
return obj.object_store_id
else:
log.warning('The backend object store ID (%s) for %s object with ID %s is invalid'
% (obj.object_store_id, obj.__class__.__name__, obj.id))
# if this instance has been switched from a non-distributed to a
# distributed object store, or if the object's store id is invalid,
# try to locate the object
for id, store in self.backends.items():
if store.exists(obj, **kwargs):
log.warning('%s object with ID %s found in backend object store with ID %s'
% (obj.__class__.__name__, obj.id, id))
obj.object_store_id = id
_create_object_in_session(obj)
return id
return None