本文整理汇总了Python中celery.chain方法的典型用法代码示例。如果您正苦于以下问题:Python celery.chain方法的具体用法?Python celery.chain怎么用?Python celery.chain使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类celery
的用法示例。
在下文中一共展示了celery.chain方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: import_task
# 需要导入模块: import celery [as 别名]
# 或者: from celery import chain [as 别名]
def import_task():
"""
Enter all of the daily import tasks into the queue, where they can grind away from there.
The import is broken up into tasks for a few reasons: it can be paused by stopping the sims queue if necessary;
works around the celery task time limit.
"""
if not settings.DO_IMPORTING_HERE:
return
tasks = [
daily_cleanup.si(),
fix_unknown_emplids.si(),
get_role_people.si(),
import_grads.si(),
get_update_grads_task(),
import_offerings.si(continue_import=True),
import_semester_info.si(),
import_active_grad_gpas.si(),
#get_import_offerings_task(),
#import_combined_sections.si(),
#send_report.si()
]
chain(*tasks).apply_async()
示例2: get_import_offerings_tasks
# 需要导入模块: import celery [as 别名]
# 或者: from celery import chain [as 别名]
def get_import_offerings_tasks():
"""
Get all of the offerings to import, and build tasks (in groups) to do the work.
Doesn't actually call the jobs: just returns celery tasks to be called.
"""
#offerings = importer.import_offerings(extra_where="ct.subject='CMPT' and ct.catalog_nbr IN (' 383', ' 470')")
#offerings = importer.import_offerings()
offerings = importer.import_offerings(cancel_missing=True)
offerings = list(offerings)
offerings.sort()
offering_groups = _grouper(offerings, 10)
slug_groups = ([o.slug for o in offerings] for offerings in offering_groups)
#tasks = [import_offering_group.si(slugs) for slugs in slug_groups]
#return tasks
offering_import_chain = chain(*[import_offering_group.si(slugs) for slugs in slug_groups])
return offering_import_chain
示例3: send_commands_to_celery
# 需要导入模块: import celery [as 别名]
# 或者: from celery import chain [as 别名]
def send_commands_to_celery(populated_command_tuple,output_base_dir,simulation):
celery_path = sys.path[0]
cmd_name, populated_command, vhost, outfile, workspace, task_id,scanned_service_port, scanned_service_name,scanned_service_protocol = populated_command_tuple
host_dir = output_base_dir + vhost
host_data_dir = host_dir + "/celerystalkOutput/"
utils.create_task(cmd_name, populated_command, vhost, outfile, workspace, task_id)
result = chain(
# insert a row into the database to mark the task as submitted. a subtask does not get tracked
# in celery the same way a task does, for instance, you can't find it in flower
# tasks.cel_create_task.subtask(args=(cmd_name, populated_command, ip, outfile + ".txt", workspace, task_id)),
# run the command. run_task takes care of marking the task as started and then completed.
# The si tells run_cmd to ignore the data returned from a previous task
tasks.run_cmd.si(cmd_name, populated_command, celery_path, task_id).set(task_id=task_id),
)() # .apply_async()
#task_id_list.append(result.task_id)
host_audit_log = host_dir + "/" + "{0}_executed_commands.txt".format(vhost)
f = open(host_audit_log, 'a')
f.write(populated_command + "\n\n")
f.close()
示例4: _get_index_analyzers
# 需要导入模块: import celery [as 别名]
# 或者: from celery import chain [as 别名]
def _get_index_analyzers():
"""Get list of index analysis tasks to run.
Returns:
Celery chain of index analysis tasks as Celery subtask signatures or
None if index analyzers are disabled in config.
"""
tasks = []
index_analyzers = current_app.config.get('AUTO_INDEX_ANALYZERS')
if not index_analyzers:
return None
for analyzer_name, _ in manager.AnalysisManager.get_analyzers(
index_analyzers):
tasks.append(run_index_analyzer.s(analyzer_name))
return chain(tasks)
示例5: run
# 需要导入模块: import celery [as 别名]
# 或者: from celery import chain [as 别名]
def run(self):
cause_options = self.cause_agent.options or {}
cause_options.update({k: v for k, v in self.cause_options.items()
if not k.startswith('_')})
cause_options['task_pair_id'] = self.id
effect_options = self.effect_agent.options or {}
effect_options.update({k: v for k, v in self.effect_options.items()
if not k.startswith('_')})
effect_options['task_pair_id'] = self.id
cause = self.cause.s(**cause_options)
if getattr(self.cause, 'dedup_key', None) is not None:
effect = dedup_effect_wrapper.s(
dedup_key=self.cause.dedup_key,
task_pair_id=self.id,
effect=self.effect.s(**effect_options),
)
else:
effect = self.effect.s(**effect_options)
return chain(cause, dmap.s(effect))()
示例6: get_update_grads_task
# 需要导入模块: import celery [as 别名]
# 或者: from celery import chain [as 别名]
def get_update_grads_task():
"""
Get grad students to import, and build tasks (in groups) to do the work.
Doesn't actually call the jobs: just returns a celery task to be called.
"""
active = GradStudent.objects.filter(current_status__in=STATUS_ACTIVE).select_related('person')
applicants = GradStudent.objects.filter(current_status__in=STATUS_APPLICANT,
updated_at__gt=datetime.datetime.now()-datetime.timedelta(days=7)).select_related('person')
grads = itertools.chain(active, applicants)
emplids = set(gs.person.emplid for gs in grads)
emplid_groups = _grouper(emplids, 20)
grad_import_chain = chain(*[import_grad_group.si(list(emplids)) for emplids in emplid_groups])
return grad_import_chain
示例7: aquatone_host
# 需要导入模块: import celery [as 别名]
# 或者: from celery import chain [as 别名]
def aquatone_host(urls_to_screenshot,vhost,workspace,simulation,scan_output_base_file_dir,celery_path,config_file=None):
print("in aquatone host")
celery_path = lib.db.get_current_install_path()[0][0]
config, supported_services = config_parser.read_config_ini(config_file)
for (cmd_name, cmd) in config.items("screenshots"):
#print(cmd_name, cmd)
try:
if cmd_name == "aquatone":
outfile = scan_output_base_file_dir + "_" + cmd_name
filename = "/tmp/" + workspace + "_paths_" + vhost + ".txt"
populated_command = cmd.replace("[FILE]", filename).replace("[OUTPUT]", outfile)
populated_command = replace_user_config_options(config_file,populated_command)
paths = lib.db.get_all_paths_for_host_path_only(vhost,workspace)
print(str(paths))
with open(filename, 'w') as paths_tmp_file:
#paths_tmp_file.write(str(paths))
for line in paths:
#print(str(line))
paths_tmp_file.write(str(line[0]) + "\n")
populated_command = cmd.replace("[FILE]", filename).replace("[OUTPUT]", outfile)
populated_command = replace_user_config_options(config_file,populated_command)
#print(populated_command)
except Exception, e:
print(e)
print("[!] Error: In the config file, there needs to be one (and only one) enabled aquatone command.")
exit()
task_id = uuid()
utils.create_task(cmd_name, populated_command, vhost, outfile + "/aquatone_report.html", workspace, task_id)
result = chain(
tasks.run_cmd.si(cmd_name, populated_command, celery_path, task_id).set(task_id=task_id),
)()
示例8: create_dns_recon_tasks
# 需要导入模块: import celery [as 别名]
# 或者: from celery import chain [as 别名]
def create_dns_recon_tasks(domains,simulation,workspace,output_base_dir,out_of_scope_hosts=None,config_file=None):
workspace_mode = lib.db.get_workspace_mode(workspace)[0][0]
task_id_list = []
total_tasks_num = 0
config, supported_services = config_parser.read_config_ini(config_file)
celery_path = sys.path[0]
for domain in domains.split(","):
for section in config.sections():
if section == "domain-recon":
for (cmd_name, cmd) in config.items(section):
outfile = output_base_dir + domain + "_" + cmd_name
populated_command = cmd.replace("[DOMAIN]", domain).replace("[OUTPUT]", outfile)
populated_command = replace_user_config_options(config_file, populated_command)
if simulation:
populated_command = "#" + populated_command
#print(populated_command)
# Grab a UUID from celery.utils so that i can assign it to my task at init, which is amazing because
# that allows me to pass it to all of the tasks in the chain.
task_id = uuid()
utils.create_task(cmd_name, populated_command, domain, outfile + ".txt", workspace, task_id)
process_domain_tuple = (cmd_name, populated_command, output_base_dir, workspace, domain, simulation, celery_path, workspace_mode)
result = chain(
# insert a row into the database to mark the task as submitted. a subtask does not get tracked
# in celery the same way a task does, for instance, you can't find it in flower
#tasks.cel_create_task.subtask(args=(cmd_name, populated_command, domain, "", workspace, task_id)),
# run the command. run_task takes care of marking the task as started and then completed.
# The si tells run_cmd to ignore the data returned from a previous task
tasks.run_cmd.si(cmd_name, populated_command,celery_path,task_id,process_domain_tuple=process_domain_tuple).set(task_id=task_id),
)() # .apply_async()
total_tasks_num = total_tasks_num + len(task_id_list)
print("\n\n[+] Summary:\tSubmitted {0} tasks to the [{1}] workspace.".format(total_tasks_num, workspace))
print("[+]\t\tThere might be additional tasks added to the queue during post processing\n[+]")
print("[+]\t\tTo keep an eye on things, run one of these commands: \n[+]")
print("[+]\t\tcelerystalk query [watch]")
print("[+]\t\tcelerystalk query brief [watch]")
print("[+]\t\tcelerystalk query summary [watch]\n")
示例9: nmap_scan_subdomain_host
# 需要导入模块: import celery [as 别名]
# 或者: from celery import chain [as 别名]
def nmap_scan_subdomain_host(vhost,workspace,simulation,output_base_dir,config_file=None):
celery_path = sys.path[0]
config_nmap_options = config_parser.extract_bb_nmap_options(config_file=config_file)
config = ConfigParser(allow_no_value=True)
config.read(['config.ini'])
vhost_explicitly_out_of_scope = lib.db.is_vhost_explicitly_out_of_scope(vhost, workspace)
output_host_dir = os.path.normpath(os.path.join(output_base_dir, vhost))
try:
os.stat(output_host_dir)
except:
os.makedirs(output_host_dir)
output_file = os.path.normpath(os.path.join(output_host_dir, vhost + "_nmap_tcp_scan"))
if not vhost_explicitly_out_of_scope:
#print(config_nmap_options)
cmd_name = "nmap_tcp_scan"
try:
if not simulation:
populated_command = "nmap " + vhost + config_nmap_options + " -oA " + output_file
else:
populated_command = "#nmap " + vhost + config_nmap_options + " -oA " + output_file
except TypeError:
print("[!] Error: In the config file, there needs to be one, and only one, enabled tcp_scan command in the nmap_commands section.")
print("[!] This determines what ports to scan.")
exit()
task_id = uuid()
utils.create_task(cmd_name, populated_command, vhost, output_file, workspace, task_id)
result = chain(
tasks.run_cmd.si(cmd_name, populated_command, celery_path, task_id,output_file=output_file,process_nmap=True).set(task_id=task_id),
)()
示例10: full_pipeline
# 需要导入模块: import celery [as 别名]
# 或者: from celery import chain [as 别名]
def full_pipeline(dataset_id, project_id):
'''
Get properties and then get viz specs
'''
pipeline = chain([
ingestion_pipeline(dataset_id, project_id),
viz_spec_pipeline(dataset_id, project_id, [])
])
return pipeline
示例11: test_b2c_tasks
# 需要导入模块: import celery [as 别名]
# 或者: from celery import chain [as 别名]
def test_b2c_tasks(self, mock_post, mock_get):
self.assertTrue(chain(send_b2c_request_task.s(100, 254708374149, 1),
process_b2c_call_response_task.s(1)).apply_async())
示例12: test_online_tasks
# 需要导入模块: import celery [as 别名]
# 或者: from celery import chain [as 别名]
def test_online_tasks(self, mock_post, mock_get):
self.assertTrue(
chain(call_online_checkout_task.s(254708374149, 100, '', ''),
handle_online_checkout_response_task.s(1)).apply_async())
示例13: loads
# 需要导入模块: import celery [as 别名]
# 或者: from celery import chain [as 别名]
def loads(payload):
if payload.get('type') != 'normal':
raise Exception('celery task loader only support normal mode')
tasks = payload.get('tasks', [])
cts = []
for task in tasks:
ops = [load(id, task.get('args'), task.get('on_error')) if i == 0 else load(id, None, task.get('on_error')) for
i, id in enumerate(task['ids'])]
cts.append(chain(ops))
callback = payload.get('callback')
if callback:
return chord(header=group(cts), body=func.load(callback).s())
return group(cts)
示例14: barrier
# 需要导入模块: import celery [as 别名]
# 或者: from celery import chain [as 别名]
def barrier(self, data, merging=False, sequential=False, replace=None, root_docs=None):
replacement = []
if isinstance(data[0], basestring):
data = [data]
# XXX: ordering might be incorrect because of suffixes. fix by replacing
# tuples with class
data = sorted(data, key=lambda x: x[1])
# merge output from same source documents
if merging == 'doc':
for docs, task in izip(cycle(_group_by_prefix(data, root_docs)), replace):
if sequential:
task[0]['args'] = [docs]
replacement.append(chain(signature(t) for t in task))
else:
task['args'] = [docs]
replacement.append(signature(task))
# merge everything
elif merging:
for task in replace:
if sequential:
task[0]['args'] = [data]
replacement.append(chain(signature(t) for t in task))
else:
task['args'] = [data]
replacement.append(signature(task))
else:
for ret_val, task in izip(cycle(data), replace):
if sequential:
task[0]['args'] = [ret_val]
replacement.append(chain(signature(t) for t in task))
else:
task['args'] = [ret_val]
replacement.append(signature(task))
raise self.replace(group(replacement))
示例15: zmap_scan_order
# 需要导入模块: import celery [as 别名]
# 或者: from celery import chain [as 别名]
def zmap_scan_order(self, order_uuid=None):
"""
Perform Zmap scans for all necessary ports for the given order.
:param order_uuid: The UUID of the order to scan.
:return: None
"""
port_tuples = get_ports_to_scan_for_scan_config(
config_uuid=self.scan_config.uuid,
db_session=self.db_session,
)
logger.info(
"Now scanning order %s for %s total ports."
% (order_uuid, len(port_tuples))
)
task_signatures = []
scan_signatures = []
network_scan = create_network_scan_for_organization(
db_session=self.db_session,
org_uuid=self.org_uuid,
)
self.commit_session()
for port, protocol in port_tuples:
scan_signatures.append(zmap_scan_order_for_port.si(
port=port,
protocol=protocol,
order_uuid=order_uuid,
network_scan_uuid=network_scan.uuid,
))
task_signatures.append(group(scan_signatures))
task_signatures.append(update_zmap_scan_completed.si(
scan_uuid=network_scan.uuid,
org_uuid=self.org_uuid,
order_uuid=order_uuid,
))
logger.info("Kicking off Zmap subtasks now.")
canvas_sig = chain(task_signatures)
canvas_sig.apply_async()
#USED