本文整理汇总了Python中celery.task.TaskSet类的典型用法代码示例。如果您正苦于以下问题:Python TaskSet类的具体用法?Python TaskSet怎么用?Python TaskSet使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了TaskSet类的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_counter_taskset
def test_counter_taskset(self):
increment_counter.count = 0
ts = TaskSet(tasks=[
increment_counter.s(),
increment_counter.s(increment_by=2),
increment_counter.s(increment_by=3),
increment_counter.s(increment_by=4),
increment_counter.s(increment_by=5),
increment_counter.s(increment_by=6),
increment_counter.s(increment_by=7),
increment_counter.s(increment_by=8),
increment_counter.s(increment_by=9),
])
self.assertEqual(ts.total, 9)
consumer = increment_counter.get_consumer()
consumer.purge()
consumer.close()
taskset_res = ts.apply_async()
subtasks = taskset_res.subtasks
taskset_id = taskset_res.taskset_id
consumer = increment_counter.get_consumer()
for subtask in subtasks:
m = consumer.queues[0].get().payload
self.assertDictContainsSubset({'taskset': taskset_id,
'task': increment_counter.name,
'id': subtask.id}, m)
increment_counter(
increment_by=m.get('kwargs', {}).get('increment_by'))
self.assertEqual(increment_counter.count, sum(xrange(1, 10)))
示例2: hotspotsRange_ts
def hotspotsRange_ts(start_time, stop_time, location, **kwargs):
''' Run ofver a range of timesteps at 5 minute intervals in between '''
start = datetime.strptime(start_time, '%Y%m%d.%H%M%S')
stop = datetime.strptime(stop_time, '%Y%m%d.%H%M%S')
kwargs.update({'task_id': hotspotsRange.request.id})
job = TaskSet(tasks=[ cybercomq.gis.hotspotpysal.hotspots.subtask(args=(ts,location), kwargs=kwargs, queue="gis", track_started=True) for ts in date_range(start,stop) ])
job.apply_async(job)
return '%s' % (hotspotsRange_ts.request.id)
示例3: handle
def handle(self, *args, **options):
docs = RECAPDocument.objects.exclude(filepath_local='')
if options['skip_ocr']:
# Focus on the items that we don't know if they need OCR.
docs = docs.filter(ocr_status=None)
else:
# We're doing OCR. Only work with those items that require it.
docs = docs.filter(ocr_status=RECAPDocument.OCR_NEEDED)
count = docs.count()
print("There are %s documents to process." % count)
if options.get('order') is not None:
if options['order'] == 'small-first':
docs = docs.order_by('page_count')
elif options['order'] == 'big-first':
docs = docs.order_by('-page_count')
subtasks = []
completed = 0
for pk in docs.values_list('pk', flat=True):
# Send the items off for processing.
last_item = (count == completed)
subtasks.append(extract_recap_pdf.subtask(
(pk, options['skip_ocr']),
priority=5,
queue=options['queue']
))
# Every enqueue_length items, send the subtasks to Celery.
if (len(subtasks) >= options['queue_length']) or last_item:
msg = ("Sent %s subtasks to celery. We have sent %s "
"items so far." % (len(subtasks), completed + 1))
logger.info(msg)
print(msg)
job = TaskSet(tasks=subtasks)
job.apply_async().join()
subtasks = []
completed += 1
示例4: make_download_tasks
def make_download_tasks(data, line_count, start_line):
"""For every item in the CSV, send it to Celery for processing"""
previous_casenum = None
subtasks = []
completed = 0
for index, item in data.iterrows():
if completed < start_line - 1:
# Skip ahead if start_lines is provided.
completed += 1
continue
last_item = (line_count == completed + 1)
if item['casenum'] != previous_casenum:
# New case, get the docket before getting the pdf
logger.info("New docket found with casenum: %s" % item['casenum'])
previous_casenum = item['casenum']
filename = get_docket_filename(item['court'], item['casenum'])
url = get_docketxml_url(item['court'], item['casenum'])
subtasks.append(download_recap_item.subtask((url, filename)))
# Get the document
filename = get_document_filename(item['court'], item['casenum'],
item['docnum'], item['subdocnum'])
url = get_pdf_url(item['court'], item['casenum'], filename)
subtasks.append(download_recap_item.subtask((url, filename)))
# Every n items send the subtasks to Celery.
if (len(subtasks) >= 1000) or last_item:
msg = ("Sent %s subtasks to celery. We have processed %s "
"rows so far." % (len(subtasks), completed + 1))
logger.info(msg)
print msg
job = TaskSet(tasks=subtasks)
job.apply_async().join()
subtasks = []
completed += 1
示例5: test_named_taskset
def test_named_taskset(self):
prefix = 'test_named_taskset-'
ts = TaskSet([return_True_task.subtask([1])])
res = ts.apply(taskset_id=prefix + uuid())
self.assertTrue(res.taskset_id.startswith(prefix))
示例6: test_function_taskset
def test_function_taskset(self):
subtasks = [return_True_task.s(i) for i in range(1, 6)]
ts = TaskSet(subtasks)
res = ts.apply_async()
self.assertListEqual(res.join(), [True, True, True, True, True])
示例7: run
def run(self, image_pk, destination_image_format, image_filepath, **kwargs):
logger = self.get_logger(**kwargs)
task_id = kwargs.get("task_id", "")
logger.info("Starting request %s. image_pk: %s, destination_image_format: %s, image_filepath: %s" %
(task_id,
image_pk,
destination_image_format,
image_filepath))
try:
# -----------------------------------------------------------------
# Resize the image into large, medium, and small images.
# -----------------------------------------------------------------
logger.info("Starting resize tasks...")
resize_tasks = TaskSet(tasks=[
ResizeImageTask.subtask((image_filepath, destination_image_format, Image.image_xlarge_dimensions)),
ResizeImageTask.subtask((image_filepath, destination_image_format, Image.image_large_dimensions)),
ResizeImageTask.subtask((image_filepath, destination_image_format, Image.image_medium_dimensions)),
ResizeImageTask.subtask((image_filepath, destination_image_format, Image.image_small_dimensions)),
])
resize_tasks_results = resize_tasks.apply()
if not resize_tasks_results.successful():
logger.error("Error during resize tasks, bail out.")
for result in resize_tasks_results:
if result.exception:
raise result.exception
resize_tasks_results = [unicode(result) for result in resize_tasks_results]
logger.info("Resize tasks done:\n%s." % (pprint.pformat(resize_tasks_results), ))
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# Save each of the resized images to S3.
# -----------------------------------------------------------------
logger.info("Starting save tasks...")
save_tasks = TaskSet(tasks=[
SaveImageDataTask.subtask((resize_tasks_results[0], image_pk, "image_xlarge")),
SaveImageDataTask.subtask((resize_tasks_results[1], image_pk, "image_large")),
SaveImageDataTask.subtask((resize_tasks_results[2], image_pk, "image_medium")),
SaveImageDataTask.subtask((resize_tasks_results[3], image_pk, "image_small")),
])
save_tasks_results = save_tasks.apply()
if not save_tasks_results.successful():
logger.error("Error during save tasks, bail out.")
for result in save_tasks_results:
if result.exception:
raise result.exception
#save_tasks_results = [result.get() for result in save_tasks_results]
logger.info("Save tasks done\n%s" % (pprint.pformat(save_tasks_results), ))
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# Mark the Image object as being uploaded.
# -----------------------------------------------------------------
image = Image.objects.get(pk=image_pk)
image.is_uploaded = True
image.save()
# Clean up the original temporary file and the other three new
# temporary files created for the resized images.
for filepath in resize_tasks_results + [image_filepath]:
logger.info("Deleting file %s" % filepath)
os.remove(filepath)
# -----------------------------------------------------------------
except Exception, exc:
logger.exception("HandleImageUploadTask_%s: unhandled exception." % (task_id, ))
self.retry(exc=exc,
args=(image_pk, destination_image_format, image_filepath),
kwargs=kwargs)