本文整理匯總了Python中rq.worker.Worker類的典型用法代碼示例。如果您正苦於以下問題:Python Worker類的具體用法?Python Worker怎麽用?Python Worker使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了Worker類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_job_access_within_job_function
def test_job_access_within_job_function(self):
"""The current job is accessible within the job function."""
q = Queue()
q.enqueue(fixtures.access_self) # access_self calls get_current_job() and asserts
w = Worker([q])
w.work(burst=True)
assert get_failed_queue(self.testconn).count == 0
示例2: test_all_queues
def test_all_queues(self):
"""All queues"""
q1 = Queue('first-queue')
q2 = Queue('second-queue')
q3 = Queue('third-queue')
# Ensure a queue is added only once a job is enqueued
self.assertEquals(len(Queue.all()), 0)
q1.enqueue(say_hello)
self.assertEquals(len(Queue.all()), 1)
# Ensure this holds true for multiple queues
q2.enqueue(say_hello)
q3.enqueue(say_hello)
names = [q.name for q in Queue.all()]
self.assertEquals(len(Queue.all()), 3)
# Verify names
self.assertTrue('first-queue' in names)
self.assertTrue('second-queue' in names)
self.assertTrue('third-queue' in names)
# Now empty two queues
w = Worker([q2, q3])
w.work(burst=True)
# Queue.all() should still report the empty queues
self.assertEquals(len(Queue.all()), 3)
示例3: test_job_access_within_job_function
def test_job_access_within_job_function(self):
"""The current job is accessible within the job function."""
q = Queue()
job = q.enqueue(fixtures.access_self)
w = Worker([q])
w.work(burst=True)
# access_self calls get_current_job() and executes successfully
self.assertEqual(job.get_status(), JobStatus.FINISHED)
示例4: test_job_execution
def test_job_execution(self):
"""Job is removed from StartedJobRegistry after execution."""
registry = StartedJobRegistry(connection=self.testconn)
queue = Queue(connection=self.testconn)
worker = Worker([queue])
job = queue.enqueue(say_hello)
self.assertTrue(job.is_queued)
worker.prepare_job_execution(job)
self.assertIn(job.id, registry.get_job_ids())
self.assertTrue(job.is_started)
worker.perform_job(job, queue)
self.assertNotIn(job.id, registry.get_job_ids())
self.assertTrue(job.is_finished)
# Job that fails
job = queue.enqueue(div_by_zero)
worker.prepare_job_execution(job)
self.assertIn(job.id, registry.get_job_ids())
worker.perform_job(job, queue)
self.assertNotIn(job.id, registry.get_job_ids())
示例5: test_job_deletion
def test_job_deletion(self):
"""Ensure job is removed from StartedJobRegistry when deleted."""
registry = StartedJobRegistry(connection=self.testconn)
queue = Queue(connection=self.testconn)
worker = Worker([queue])
job = queue.enqueue(say_hello)
self.assertTrue(job.is_queued)
worker.prepare_job_execution(job)
self.assertIn(job.id, registry.get_job_ids())
job.delete()
self.assertNotIn(job.id, registry.get_job_ids())
示例6: test_jobs_are_put_in_registry
def test_jobs_are_put_in_registry(self):
"""Completed jobs are added to FinishedJobRegistry."""
self.assertEqual(self.registry.get_job_ids(), [])
queue = Queue(connection=self.testconn)
worker = Worker([queue])
# Completed jobs are put in FinishedJobRegistry
job = queue.enqueue(say_hello)
worker.perform_job(job)
self.assertEqual(self.registry.get_job_ids(), [job.id])
# Failed jobs are not put in FinishedJobRegistry
failed_job = queue.enqueue(div_by_zero)
worker.perform_job(failed_job)
self.assertEqual(self.registry.get_job_ids(), [job.id])
示例7: worker
def worker(worker_num, backend):
import subprocess
print("Worker %i started" % worker_num)
if backend == "pq":
subprocess.call("django-admin.py pqworker benchmark -b", shell=True)
elif backend == "rq":
from rq.worker import Worker
from redis import Redis
from rq import Queue
q = Queue("benchmark", connection=Redis())
w = Worker(q, connection=Redis())
w.work(burst=False)
print("Worker %i fin" % worker_num)
return
示例8: worker_details
def worker_details(request, queue_index, key):
queue_index = int(queue_index)
queue = get_queue_by_index(queue_index)
worker = Worker.find_by_key(key, connection=queue.connection)
try:
# Convert microseconds to milliseconds
worker.total_working_time = worker.total_working_time / 1000
except AttributeError:
# older version of rq do not have `total_working_time`
worker.total_working_time = "-"
queue_names = ', '.join(worker.queue_names())
def get_job_graceful(worker):
if not worker:
return None
try:
return worker.get_current_job()
except NoSuchJobError:
return None
context_data = {
'queue': queue,
'queue_index': queue_index,
'worker': worker,
'queue_names': queue_names,
'job': get_job_graceful(worker)
}
return render(request, 'django_rq/worker_details.html', context_data)
示例9: workers
def workers():
counter = Counter()
for w in Worker.all(connection=worker.connection):
for q in w.queues:
counter[q.name] += 1
import pprint
pprint.pprint(dict(counter))
示例10: worker
def worker(worker_num, backend):
import subprocess
print('Worker %i started' % worker_num)
if backend == 'pq':
subprocess.call(
'django-admin.py pqworker benchmark -b', shell=True)
elif backend == 'rq':
from rq.worker import Worker
from redis import Redis
from rq import Queue
q = Queue('benchmark', connection=Redis())
w = Worker(q, connection=Redis())
w.work(burst=False)
print('Worker %i fin' % worker_num)
return
示例11: workers
def workers():
"""Show information on salactus workers. (slow)"""
counter = Counter()
for w in Worker.all(connection=worker.connection):
for q in w.queues:
counter[q.name] += 1
import pprint
pprint.pprint(dict(counter))
示例12: test_requeue
def test_requeue(self):
"""FailedJobRegistry.requeue works properly"""
queue = Queue(connection=self.testconn)
job = queue.enqueue(div_by_zero, failure_ttl=5)
worker = Worker([queue])
worker.work(burst=True)
registry = FailedJobRegistry(connection=worker.connection)
self.assertTrue(job in registry)
registry.requeue(job.id)
self.assertFalse(job in registry)
self.assertIn(job.id, queue.get_job_ids())
job.refresh()
self.assertEqual(job.get_status(), JobStatus.QUEUED)
worker.work(burst=True)
self.assertTrue(job in registry)
# Should also work with job instance
registry.requeue(job)
self.assertFalse(job in registry)
self.assertIn(job.id, queue.get_job_ids())
job.refresh()
self.assertEqual(job.get_status(), JobStatus.QUEUED)
worker.work(burst=True)
self.assertTrue(job in registry)
# requeue_job should work the same way
requeue_job(job.id, connection=self.testconn)
self.assertFalse(job in registry)
self.assertIn(job.id, queue.get_job_ids())
job.refresh()
self.assertEqual(job.get_status(), JobStatus.QUEUED)
worker.work(burst=True)
self.assertTrue(job in registry)
# And so does job.requeue()
job.requeue()
self.assertFalse(job in registry)
self.assertIn(job.id, queue.get_job_ids())
job.refresh()
self.assertEqual(job.get_status(), JobStatus.QUEUED)
示例13: test_jobs_are_put_in_registry
def test_jobs_are_put_in_registry(self):
"""Completed jobs are added to FinishedJobRegistry."""
self.assertEqual(self.registry.get_job_ids(), [])
queue = Queue(connection=self.testconn)
worker = Worker([queue])
# Completed jobs are put in FinishedJobRegistry
job = queue.enqueue(say_hello)
worker.perform_job(job, queue)
self.assertEqual(self.registry.get_job_ids(), [job.id])
# When job is deleted, it should be removed from FinishedJobRegistry
self.assertEqual(job.get_status(), JobStatus.FINISHED)
job.delete()
self.assertEqual(self.registry.get_job_ids(), [])
# Failed jobs are not put in FinishedJobRegistry
failed_job = queue.enqueue(div_by_zero)
worker.perform_job(failed_job, queue)
self.assertEqual(self.registry.get_job_ids(), [])
示例14: test_worker_handle_job_failure
def test_worker_handle_job_failure(self):
"""Failed jobs are added to FailedJobRegistry"""
q = Queue(connection=self.testconn)
w = Worker([q])
registry = FailedJobRegistry(connection=w.connection)
timestamp = current_timestamp()
job = q.enqueue(div_by_zero, failure_ttl=5)
w.handle_job_failure(job)
# job is added to FailedJobRegistry with default failure ttl
self.assertIn(job.id, registry.get_job_ids())
self.assertLess(self.testconn.zscore(registry.key, job.id),
timestamp + DEFAULT_FAILURE_TTL + 5)
# job is added to FailedJobRegistry with specified ttl
job = q.enqueue(div_by_zero, failure_ttl=5)
w.handle_job_failure(job)
self.assertLess(self.testconn.zscore(registry.key, job.id),
timestamp + 7)
示例15: get_statistics
def get_statistics():
queues = []
for index, config in enumerate(QUEUES_LIST):
queue = get_queue_by_index(index)
connection = queue.connection
connection_kwargs = connection.connection_pool.connection_kwargs
# Raw access to the first item from left of the redis list.
# This might not be accurate since new job can be added from the left
# with `at_front` parameters.
# Ideally rq should supports Queue.oldest_job
last_job_id = connection.lindex(queue.key, 0)
last_job = queue.fetch_job(last_job_id.decode('utf-8')) if last_job_id else None
if last_job:
oldest_job_timestamp = to_localtime(last_job.enqueued_at)\
.strftime('%Y-%m-%d, %H:%M:%S')
else:
oldest_job_timestamp = "-"
# parse_class and connection_pool are not needed and not JSON serializable
connection_kwargs.pop('parser_class', None)
connection_kwargs.pop('connection_pool', None)
queue_data = {
'name': queue.name,
'jobs': queue.count,
'oldest_job_timestamp': oldest_job_timestamp,
'index': index,
'connection_kwargs': connection_kwargs
}
if queue.name == 'failed':
queue_data['workers'] = '-'
queue_data['finished_jobs'] = '-'
queue_data['started_jobs'] = '-'
queue_data['deferred_jobs'] = '-'
else:
connection = get_connection(queue.name)
queue_data['workers'] = Worker.count(queue=queue)
finished_job_registry = FinishedJobRegistry(queue.name, connection)
started_job_registry = StartedJobRegistry(queue.name, connection)
deferred_job_registry = DeferredJobRegistry(queue.name, connection)
failed_job_registry = FailedJobRegistry(queue.name, connection)
queue_data['finished_jobs'] = len(finished_job_registry)
queue_data['started_jobs'] = len(started_job_registry)
queue_data['deferred_jobs'] = len(deferred_job_registry)
queue_data['failed_jobs'] = len(failed_job_registry)
queues.append(queue_data)
return {'queues': queues}