当前位置: 首页>>代码示例>>Python>>正文


Python sets.TaskSet类代码示例

本文整理汇总了Python中celery.task.sets.TaskSet的典型用法代码示例。如果您正苦于以下问题:Python TaskSet类的具体用法?Python TaskSet怎么用?Python TaskSet使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了TaskSet类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: hdf2

def hdf2(hdf1):

    #taskset = TaskSet(tasks.hdf2geotiff(hdf1))
    taskset = TaskSet(tasks.hdf2geotiff.subtask((x, )) for x in hdf1)
    taskset_result = taskset.apply_async()

    results = taskset_result.join_native()
开发者ID:Dontsov,项目名称:AltGeoView,代码行数:7,代码来源:mod07_hdf2geotiff.py

示例2: update_user

def update_user(user, requester):
    """ Fetch new weeks, or possibly those that failed before."""
    # TODO: fail here if couldn't contact last.fm
    # Have to fetch the chart list from last.fm because their timestamps are awkward, especially
    # those on the first few charts released.
    chart_list = fetch_chart_list(user.username, requester)
    successful_requests = Update.objects.weeks_fetched(user)

    # create taskset and run it.
    update_tasks = []
    updates = []
    with transaction.commit_on_success():
        for start, end in chart_list:
            idx = ldates.index_of_timestamp(end)
            # Skip if this week is before the user signed up
            if not idx < user.first_sunday_with_data:
                # skip if data has already been successfully fetched
                if (idx, Update.ARTIST) not in successful_requests:
                    update = Update(user=user, week_idx=idx, type=Update.ARTIST)
                    updates.append(update)
                    update_tasks.append(fetch_week_data.subtask((user, requester, start, end, Update.ARTIST)))
#                if (idx, Update.TRACK) not in successful_requests:
#                    Update.objects.create(user=user, week_idx=idx, type=Update.TRACK)
#                    update_tasks.append(fetch_week_data.subtask((user, requester, start, end, Update.TRACK)))

    Update.objects.bulk_create(updates)
    ts = TaskSet(update_tasks)
    ts.apply_async()

    user.last_updated = date.today()
    user.save()

    return len(update_tasks) > 0
开发者ID:sjcorbett,项目名称:twothreefall.co.uk,代码行数:33,代码来源:tasks.py

示例3: bulk_process

def bulk_process(request):
    "Process a bulk form"
    if request.method == 'POST':
        form = BulkQuarantineProcessForm(request.POST)
        choices = request.session['quarantine_choices']
        form.fields['message_id']._choices = choices
        if form.is_valid():
            messages = Message.objects.values('id', 'from_address', 'date',
            'hostname', 'to_address').filter(id__in=form.cleaned_data['message_id'])
            del form.cleaned_data['message_id']
            formvals = []
            for message in messages:
                message.update(form.cleaned_data)
                message['date'] = str(message['date'])
                message['message_id'] = message['id']
                del message['id']
                formvals.append(message)
            taskset = TaskSet(tasks=[ProcessQuarantinedMsg.subtask(
            args=[formval], options=dict(queue=formval['hostname']))
            for formval in formvals])
            task = taskset.apply_async()
            task.save()
            return HttpResponseRedirect(reverse('task-status',
            args=[task.taskset_id]))

    msg = _('System was unable to process your request')
    djmessages.info(request, msg)
    return HttpResponseRedirect(reverse('all-messages-index',
    args=['quarantine']))
开发者ID:heartshare,项目名称:baruwa,代码行数:29,代码来源:views.py

示例4: bags_migrate_s3

def bags_migrate_s3(mongo_host='oulib_mongo'):
    #catalog
    db=MongoClient(mongo_host)
    #Celery Worker storage connections
    celery_worker_hostname = os.getenv('celery_worker_hostname', "dev-mstacy")
    celery_config=db.catalog.celery_worker_config.find_one({"celery_worker":celery_worker_hostname})
    #get variable by celory worker
    norfile_bagit=celery_config['norfile']['bagit']
    s3_bucket=celery_config['s3']['bucket']
    subtasks=[]
    check_catalog=[]
    s3 = boto3.client('s3')
    for itm in db.catalog.bagit_inventory.find({"s3.exists":False}):
        #double check to make sure not already in s3
        s3_key = s3.list_objects(Bucket=s3_bucket, Prefix=itm['bag'] ,MaxKeys=1)
        if not 'Contents' in s3_key:
            subtasks.append(upload_bag_s3.subtask(args=(itm['bag'],norfile_bagit)))
        else:
            check_catalog.append(itm['bag'])
    if subtasks:
        job = TaskSet(tasks=subtasks)
        result_set = job.apply_async()

    check=",".join(check_catalog)
    return "{0} subtasks('upload_bag_s3') submitted. Check Catalog: {1}".format(len(subtasks),check)
开发者ID:mbstacy,项目名称:oulibq,代码行数:25,代码来源:tasks.py

示例5: map_reduce

def map_reduce(task, task_args, agg, acc):
    """
    Given a task and an iterable of positional arguments, apply the
    task function to the arguments in parallel and return an aggregate
    result depending on the initial value of the accumulator
    and on the aggregation function. To save memory, the order is
    not preserved and there is no list with the intermediated results:
    the accumulator is incremented as soon as a task result comes.

    NB: if the environment variable OQ_NO_DISTRIBUTE is set the
    tasks are run sequentially in the current process and then
    map_reduce(task, task_args, agg, acc) is the same as
    reduce(agg, itertools.starmap(task, task_args), acc).
    Users of map_reduce should be aware of the fact that when
    thousands of tasks are spawned and large arguments are passed
    or large results are returned they may incur in memory issue:
    this is way the calculators limit the queue with the
    `concurrent_task` concept.

    :param task: a `celery` task callable.
    :param task_args: an iterable over positional arguments
    :param agg: the aggregation function, (acc, val) -> new acc
    :param acc: the initial value of the accumulator
    :returns: the final value of the accumulator
    """
    if no_distribute():
        for the_args in task_args:
            result, exctype = safely_call(task.task_func, the_args)
            if exctype:
                raise RuntimeError(result)
            acc = agg(acc, result)
    else:
        backend = current_app().backend
        unpik = 0
        job_id = task_args[0][0]
        taskname = task.__name__
        mon = LightMonitor("unpickling %s" % taskname, job_id, task)
        to_send = 0
        pickled_args = []
        for args in task_args:
            piks = pickle_sequence(args)
            pickled_args.append(piks)
            to_send += sum(len(p) for p in piks)
        logs.LOG.info("Sending %dM", to_send / ONE_MB)
        taskset = TaskSet(tasks=map(task.subtask, pickled_args))
        for task_id, result_dict in taskset.apply_async().iter_native():
            check_mem_usage()  # log a warning if too much memory is used
            result_pik = result_dict["result"]
            with mon:
                result, exctype = result_pik.unpickle()
            if exctype:
                raise RuntimeError(result)
            unpik += len(result_pik)
            acc = agg(acc, result)
            del backend._cache[task_id]  # work around a celery bug
        logs.LOG.info("Unpickled %dM of received data in %s seconds", unpik / ONE_MB, mon.duration)
    return acc
开发者ID:vup1120,项目名称:oq-engine,代码行数:57,代码来源:tasks.py

示例6: update_documents

    def update_documents(self, documents, count):
        sys.stdout.write('Graph size is {0:d} nodes.\n'.format(count))
        sys.stdout.flush()
        processed_count = 0
        subtasks = []
        timings = []
        average_per_s = 0
        if self.index == 'concurrently':
            index_during_subtask = True
        else:
            index_during_subtask = False
        for doc in documents:
            processed_count += 1
            if processed_count % 10000 == 0:
                # Send the commit every 10000 times.
                self.si.commit()
            subtasks.append(update_document.subtask((doc, index_during_subtask)))
            if processed_count % 1000 == 1:
                t1 = time.time()
            if processed_count % 1000 == 0:
                t2 = time.time()
                timings.append(t2 - t1)
                average_per_s = 1000 / (sum(timings) / float(len(timings)))
            sys.stdout.write("\rProcessing items in Celery queue: {:.0%} ({}/{}, {:.1f}/s, Last id: {})".format(
                processed_count * 1.0 / count,
                processed_count,
                count,
                average_per_s,
                doc.pk,
            ))
            sys.stdout.flush()
            last_document = (count == processed_count)
            if (processed_count % 50 == 0) or last_document:
                # Every 5000 documents, we send the subtasks off for processing
                # Poll to see when they're done.
                job = TaskSet(tasks=subtasks)
                result = job.apply_async()
                while not result.ready():
                    time.sleep(1)

                # The jobs finished - clean things up for the next round
                subtasks = []

        if self.index == 'all_at_end':
            call_command(
                'cl_update_index',
                '--type', 'opinions',
                '--solr-url', settings.SOLR_OPINION_URL,
                '--noinput',
                '--update',
                '--everything',
                '--do-commit',
            )
        elif self.index == 'false':
            sys.stdout.write("Solr index not updated after running citation "
                             "finder. You may want to do so manually.")
开发者ID:Andr3iC,项目名称:courtlistener,代码行数:56,代码来源:cl_find_citations.py

示例7: dispatch_image

def dispatch_image(**params):
    the_subtasks = []
    the_subtasks.append(McAllisterAnaglyphTask.subtask(**params))
    the_subtasks.append(McAllisterAnaglyphTask.subtask(**params))
    the_subtasks.append(McAllisterAnaglyphTask.subtask(**params))
    the_subtasks.append(McAllisterAnaglyphTask.subtask(**params))
    the_subtasks.append(McAllisterAnaglyphTask.subtask(**params))
    job = TaskSet(tasks=the_subtasks)
    result = job.apply_async()
    return result
开发者ID:mbrown1413,项目名称:anaglyph,代码行数:10,代码来源:utils.py

示例8: fetch_feeds

def fetch_feeds(callback=None):
    logging.warn("updating all feeds")
    feeds = Feed.objects.all()
    task_list = []
    for feed in feeds:
        s = fetch_feed.subtask([feed.id])
        task_list.append(s)
    fetch_all_tasks = TaskSet(tasks=task_list)
    fetch_all_tasks.apply_async()
    return None
开发者ID:ejesse,项目名称:django-reader,代码行数:10,代码来源:tasks.py

示例9: SiteMaintenance

def SiteMaintenance():
    session = DBSession()
    categories = session.query(Category)

    subtasks = [UpdateFromFeed.subtask((cat.feedurl, cat.idcategory)) for cat in categories]
    subtasks.append(UpdateDlpStats.subtask())

    maintenanceJobs = TaskSet(tasks=subtasks)
    maintenanceJobs.apply_async().join()
    UpdateIndex.delay()
开发者ID:moatra,项目名称:fffn1,代码行数:10,代码来源:sitemaintenance.py

示例10: test_apply

    def test_apply(self):

        applied = [0]

        class mocksubtask(subtask):
            def apply(self, *args, **kwargs):
                applied[0] += 1

        ts = TaskSet([mocksubtask(MockTask, (i, i)) for i in (2, 4, 8)])
        ts.apply()
        self.assertEqual(applied[0], 3)
开发者ID:GVRGowtham,项目名称:mozillians,代码行数:11,代码来源:test_task_sets.py

示例11: _run

 def _run(fn_name, xs):
     fn = getattr(tasks, fn_name)
     job = TaskSet(tasks=[apply(fn.subtask, (x,)) for x in xs])
     result = job.apply_async()
     while not result.ready():
         time.sleep(5)
     out = []
     for x in result.join():
         if x:
             out.extend(x)
     return out
开发者ID:edajeda,项目名称:bcbb,代码行数:11,代码来源:messaging.py

示例12: make_pi_tasks

def make_pi_tasks():

    taskset = TaskSet(tasks.make_pi.subtask((x, )) for x in NUM_CALCS)
    print "Dispatching tasks"
    taskset_result = taskset.apply_async()

    print "Waiting for results"
    results = taskset_result.join_native()
    print "Results:"
    for i in results:
        print i
开发者ID:Saravanan-Kalirajan,项目名称:rentalcar,代码行数:11,代码来源:demo.py

示例13: _exec_callbacks

def _exec_callbacks(callback):
    """ Exec the callback or list of callbacks. Return asyncronous results as
    the TaskSetResult object.
    """
    async_result = None
    if callback:
        if not isinstance(callback, (list, tuple)): # not iterable
            callback = [callback,]
        taskset = TaskSet(tasks=callback)
        async_result = taskset.apply_async()
    return async_result
开发者ID:ankurchopra87,项目名称:celery-tasktree,代码行数:11,代码来源:celery_tasktree.py

示例14: run

 def run(self, set, body, interval=1, max_retries=None, **kwargs):
     if not isinstance(set, TaskSet):
         set = TaskSet(set)
     r = []
     setid = gen_unique_id()
     for task in set.tasks:
         uuid = gen_unique_id()
         task.options.update(task_id=uuid, chord=body)
         r.append(current_app.AsyncResult(uuid))
     current_app.TaskSetResult(setid, r).save()
     self.backend.on_chord_apply(setid, body, interval, max_retries)
     return set.apply_async(taskset_id=setid)
开发者ID:eldondev,项目名称:celery,代码行数:12,代码来源:chord.py

示例15: proccesing_pictrures

def proccesing_pictrures(shop_id, **kwargs):
    """
    Задача обработки картинок изображения
    """
    picture_urls = {}
    tasks_image = []
    for pict_url in db.Offers.find(spec={'shopId':shop_id}, fields=['id', 'picture'], slave_ok=True):
        picture_urls[pict_url['id']] = pict_url['picture']
        tasks_image = (download_image.subtask(url=pict_url['picture'], id=pict_url['id']))
    job = TaskSet(tasks=tasks_image)
    result = job.apply_async()#connection, connect_timeout, publisher, taskset_id)
    result.wait()
    result.join()
开发者ID:testTemtProj,项目名称:OLD_PROJECT,代码行数:13,代码来源:task_image.py


注:本文中的celery.task.sets.TaskSet类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。