当前位置: 首页>>代码示例>>Python>>正文


Python celery.group方法代码示例

本文整理汇总了Python中celery.group方法的典型用法代码示例。如果您正苦于以下问题:Python celery.group方法的具体用法?Python celery.group怎么用?Python celery.group使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在celery的用法示例。


在下文中一共展示了celery.group方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: extract_functionnames_regex

# 需要导入模块: import celery [as 别名]
# 或者: from celery import group [as 别名]
def extract_functionnames_regex(file_path, exported_only=False):
    rproc = "(^|;|};?|public:|private:)"  # group 1: begining of source line
    rproc += "\s*"  # optional whitespace(s)
    rproc += "([\w_][\w\s\d_,<>\[\].&:\*]*)"  # group 2: return type (includes associated class)
    rproc += "\s+"  # mandatory whitespace(s)
    rproc += "(\*|[\*|\w_][\w\d_<>\*\[\]&]*::)?"  # group 3: # group 3: optional class/pointer type
    rproc += "([\w_][\w\d_]*)"  # group 4: function name
    rproc += "\s*"  # optional whitespace(s)
    rproc += "\("  # '(' start of parameters
    rproc += "([\w\s,<>\[\].=&':/*]*)"  # group 4: parameters
    rproc += "\)"  # ')' end of parameters
    rproc += "\s*"  # optional whitespace(s)
    rproc += "([\w\s\d_]*)"  # group 5: optional attribute
    rproc += "\s*"  # optional whitespace(s)
    rproc += "{"  # '{' function start

    p = re.compile(rproc)
    exclude = ['if', 'while', 'do', 'for', 'switch']
    for x in p.finditer(loadtxt(file_path)):
        if x.group(4) in exclude or (exported_only and 'static' in x.group(2)):
            continue
        yield x.group(4) 
开发者ID:osssanitizer,项目名称:osspolice,代码行数:24,代码来源:signature.py

示例2: getDeep

# 需要导入模块: import celery [as 别名]
# 或者: from celery import group [as 别名]
def getDeep():
	result = {}
	for i in range(deep):
		if result:
			# те айди, которых нет в ключах + не берем id:None
			lst = list(set([item for sublist in result.values() if sublist for item in sublist]) - set(result.keys()))
			d_friends = group(deep_friends.s(i) for i in parts(list(lst), 75))().get()
			result = {k: v for d in d_friends for k, v in d.items()}
			result.update(result)
		else:
			all_friends = friends(my_id)
			d_friends = group(deep_friends.s(i) for i in parts(list(all_friends[0].keys()), 75) )().get()
			result = {k: v for d in d_friends for k, v in d.items()}
			result.update(result)

	return cleaner(result) 
开发者ID:stleon,项目名称:vk_friends,代码行数:18,代码来源:call.py

示例3: compute_threshold

# 需要导入模块: import celery [as 别名]
# 或者: from celery import group [as 别名]
def compute_threshold(self, thresh):
        # split task into 1024 chunks (too many subtasks causes a backlog
        # error)
        chunksize = len(self.decomp_ids) / 1024
        print 'Queuing %s items (chunksize %s) for threshold %s...' % (
            len(self.decomp_ids), chunksize, thresh)

        job = group([
            evaluate_decompositions_task.subtask(kwargs={
                'decomposition_ids': ids,
                'delete_failed_open': False,
                'thresh': thresh,
            })
            for ids in chunk_list_generator(self.decomp_ids, chunksize)
        ])
        result = job.apply_async()

        print 'Waiting on %s subtasks with chunksize %s...' % (
            len(self.decomp_ids) / chunksize, chunksize)

        result.join() 
开发者ID:seanbell,项目名称:opensurfaces,代码行数:23,代码来源:intrinsic_results_sweep_thresh.py

示例4: _get_inference_job_signature

# 需要导入模块: import celery [as 别名]
# 或者: from celery import group [as 别名]
def _get_inference_job_signature(self, imageIDs, maxNumWorkers=-1):
        '''
            Assembles (but does not submit) an inference job based on the provided parameters.
        '''
        # setup
        if maxNumWorkers != 1:
            # only query the number of available workers if more than one is specified to save time
            num_available = self._get_num_available_workers()
            if maxNumWorkers == -1:
                maxNumWorkers = num_available   #TODO: more than one process per worker?
            else:
                maxNumWorkers = min(maxNumWorkers, num_available)

        # distribute across workers
        images_subset = array_split(imageIDs, max(1, len(imageIDs) // maxNumWorkers))
        jobs = []
        for subset in images_subset:
            job = celery_interface.call_inference.si(imageIDs=subset)
            jobs.append(job)

        jobGroup = group(jobs)
        return jobGroup 
开发者ID:microsoft,项目名称:aerial_wildlife_detection,代码行数:24,代码来源:middleware.py

示例5: run

# 需要导入模块: import celery [as 别名]
# 或者: from celery import group [as 别名]
def run(self, query, options=None):
        """
        https://celery.readthedocs.io/en/latest/userguide/canvas.html#groups
        """

        logger.info('Starting download with query=%s, options=%s', query, options)

        # http://docs.celeryproject.org/en/latest/userguide/calling.html

        if isinstance(query, str):
            query = {'number': query}

        if isinstance(query, dict):
            self.task = self.task_function.delay(query, options)

        elif isinstance(query, list):
            tasks = [self.task_function.s(query, options) for query in query]
            task_group = celery.group(tasks)
            self.task = task_group.delay()

        else:
            raise TypeError('Unknown type for query {}. type={}'.format(query, type(query)))

        return self.task 
开发者ID:ip-tools,项目名称:uspto-opendata-python,代码行数:26,代码来源:tasks.py

示例6: mandelbrot_main

# 需要导入模块: import celery [as 别名]
# 或者: from celery import group [as 别名]
def mandelbrot_main(w, h, max_iterations=1000, output='mandelbrot_celery.png'):
    """ Main function for mandelbrot program with celery """
    
    job = group([mandelbrot_calc_row.s(y, w, h, max_iterations) for y in range(h)])
    result = job.apply_async()

    image = Image.new('RGB', (w, h))
    
    for image_rows in result.join():
        for k,v in image_rows.items():
            k = int(k)
            v = tuple(map(int, v))
            x,y = k % args.width, k // args.width
            image.putpixel((x,y), v)
            
    image.save(output, 'PNG')
    print('Saved to',output) 
开发者ID:PacktPublishing,项目名称:Software-Architecture-with-Python,代码行数:19,代码来源:celery_mandelbrot.py

示例7: authorize_exam_runs

# 需要导入模块: import celery [as 别名]
# 或者: from celery import group [as 别名]
def authorize_exam_runs():
    """
    Check for outstanding exam runs
    """
    for exam_run in ExamRun.objects.filter(
            authorized=False,
            date_first_schedulable__lte=now_in_utc(),
    ):
        enrollment_ids_qset = ProgramEnrollment.objects.filter(
            program=exam_run.course.program).values_list('id', flat=True)
        # create a group of subtasks
        job = group(
            authorize_enrollment_for_exam_run.s(enrollment_ids, exam_run.id)
            for enrollment_ids in chunks(enrollment_ids_qset)
        )
        job.apply_async()
        exam_run.authorized = True
        exam_run.save() 
开发者ID:mitodl,项目名称:micromasters,代码行数:20,代码来源:tasks.py

示例8: build_report_task

# 需要导入模块: import celery [as 别名]
# 或者: from celery import group [as 别名]
def build_report_task(results, ref_date):
    all_repos = []
    for repos in results:
        all_repos += [Repository(repo) for repo in repos]

    # 3. group by language
    grouped_repos = {}
    for repo in all_repos:
        if repo.language in grouped_repos:
            grouped_repos[repo.language].append(repo.name)
        else:
            grouped_repos[repo.language] = [repo.name]

    # 4. create csv
    lines = []
    for lang in sorted(grouped_repos.keys()):
        lines.append([lang] + grouped_repos[lang])

    filename = '{media}/github-hot-repos-{date}.csv'.format(media=settings.MEDIA_ROOT, date=ref_date)
    return make_csv(filename, lines) 
开发者ID:Rustem,项目名称:toptal-blog-celery-toy-ex,代码行数:22,代码来源:tasks.py

示例9: extract_strings_regex

# 需要导入模块: import celery [as 别名]
# 或者: from celery import group [as 别名]
def extract_strings_regex(file_path):
    """
    Function to extract strings from a C source file.
    """
    # get all strings in this repo
    p = re.compile(
        r'(?P<prefix>(?:\bu8|\b[LuU])?)(?:"(?P<dbl>[^"\\]*(?:\\.[^"\\]*)*)"|\'(?P<sngl>[^\'\\]*(?:\\.[^\'\\]*)*)\')|R"([^"(]*)\((?P<raw>.*?)\)\4"')

    with open(file_path) as ins:
        for line in ins:
            line = line.rstrip('\n')

            # filter out "include *.c|cpp|cc|h"
            if re.search(r'\s*#\s*include\s*(?:<([^>]*)>|"([^"]*)")', line):
                continue

            # filter comments
            if re.search('//.*?\n|/\*.*?\*/|^\s*\*\s.*$', line):
                continue

            # iterate over them
            for x in p.finditer(line):
                if x.group("dbl"):
                    line = x.group("dbl")
                elif x.group("sngl"):
                    continue
                else:
                    line = x.group("raw")
                yield line 
开发者ID:osssanitizer,项目名称:osspolice,代码行数:31,代码来源:signature.py

示例10: replace_scope_in_params

# 需要导入模块: import celery [as 别名]
# 或者: from celery import group [as 别名]
def replace_scope_in_params(s):
    if s.group() == "std::":
        return s.group()
    else:
        return ''


###########################################################
# Lookup library
########################################################### 
开发者ID:osssanitizer,项目名称:osspolice,代码行数:12,代码来源:searching.py

示例11: assign_task

# 需要导入模块: import celery [as 别名]
# 或者: from celery import group [as 别名]
def assign_task(end):
    from celery import group
    from remote import gen_prime  # This has to be imported
    job = group(gen_prime.s(2 ** i) for i in range(end))
    result = job.apply_async()
    result.get() 
开发者ID:osssanitizer,项目名称:osspolice,代码行数:8,代码来源:remote.py

示例12: getMutual

# 需要导入模块: import celery [as 别名]
# 或者: from celery import group [as 别名]
def getMutual():
	all_friends = friends(my_id)
	c_friends = group(mutual_friends.s(i) for i in parts(list(all_friends[0].keys()), 75))().get()
	result = {k: v for d in c_friends for k, v in d.items()}
	return cleaner(result) 
开发者ID:stleon,项目名称:vk_friends,代码行数:7,代码来源:call.py

示例13: _gen_jobs

# 需要导入模块: import celery [as 别名]
# 或者: from celery import group [as 别名]
def _gen_jobs(self, topic):
        jobs = []
        for job in self.subscribed:
            if job[1].match(topic):
                jobs.append(job[2].s())
        self.jobs[topic] = celery.group(jobs) 
开发者ID:Mulugruntz,项目名称:celery-pubsub,代码行数:8,代码来源:pubsub.py

示例14: run

# 需要导入模块: import celery [as 别名]
# 或者: from celery import group [as 别名]
def run(self, *args, **kwargs):
        with session_manager() as db_session:
            domain_list = crud_domain.get_domain_list(db_session)
            load_domain_extra_data_task = group([
                LoadDomainExtraDataTask().s(domain.id)
                for domain in domain_list
            ])
            load_result = load_domain_extra_data_task.delay()
            with allow_join_result():
                load_result.join()

            return self.set_result() 
开发者ID:QAX-A-Team,项目名称:LuWu,代码行数:14,代码来源:domain.py

示例15: handle

# 需要导入模块: import celery [as 别名]
# 或者: from celery import group [as 别名]
def handle(self, *args, **options):
        thresh = 0.10

        qset = IntrinsicImagesDecomposition.objects.all() \
            .exclude(error_comparison_thresh=thresh) \
            .values_list('id', flat=True)

        delete_failed_open = False
        if len(args) >= 1 and args[0] == "delete-failed-open":
            delete_failed_open = True

        print 'delete_failed_open: %s' % delete_failed_open

        qset = list(qset)
        random.shuffle(qset)

        chunksize = max(len(qset) / 1024, 1)
        print 'Queuing tasks in chunks of %s items...' % chunksize
        job = group([
            evaluate_decompositions_task.subtask(kwargs={
                'decomposition_ids': ids,
                'delete_failed_open': delete_failed_open,
                'thresh': thresh,
            })
            for ids in chunk_list_generator(qset, chunksize)
        ])
        job.apply_async()

        print 'Done' 
开发者ID:seanbell,项目名称:opensurfaces,代码行数:31,代码来源:intrinsic_evaluate_all.py


注:本文中的celery.group方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。