本文整理汇总了Python中w3af.core.controllers.threads.threadpool.Pool.join方法的典型用法代码示例。如果您正苦于以下问题:Python Pool.join方法的具体用法?Python Pool.join怎么用?Python Pool.join使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类w3af.core.controllers.threads.threadpool.Pool
的用法示例。
在下文中一共展示了Pool.join方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_increase_number_of_workers
# 需要导入模块: from w3af.core.controllers.threads.threadpool import Pool [as 别名]
# 或者: from w3af.core.controllers.threads.threadpool.Pool import join [as 别名]
def test_increase_number_of_workers(self):
worker_pool = Pool(processes=2,
worker_names='WorkerThread',
maxtasksperchild=3)
self.assertEqual(worker_pool.get_worker_count(), 2)
def noop():
return 1 + 2
for _ in xrange(12):
result = worker_pool.apply_async(func=noop)
self.assertEqual(result.get(), 3)
self.assertEqual(worker_pool.get_worker_count(), 2)
worker_pool.set_worker_count(4)
# It takes some time...
self.assertEqual(worker_pool.get_worker_count(), 2)
for _ in xrange(12):
result = worker_pool.apply_async(func=noop)
self.assertEqual(result.get(), 3)
self.assertEqual(worker_pool.get_worker_count(), 4)
worker_pool.terminate()
worker_pool.join()
示例2: test_multiple_append_uniq_group
# 需要导入模块: from w3af.core.controllers.threads.threadpool import Pool [as 别名]
# 或者: from w3af.core.controllers.threads.threadpool.Pool import join [as 别名]
def test_multiple_append_uniq_group(self):
def multi_append():
for i in xrange(InfoSet.MAX_INFO_INSTANCES * 2):
vuln = MockVuln()
kb.append_uniq_group('a', 'b', vuln, group_klass=MockInfoSetTrue)
info_set_list = kb.get('a', 'b')
self.assertEqual(len(info_set_list), 1)
info_set = info_set_list[0]
self.assertEqual(len(info_set.infos), InfoSet.MAX_INFO_INSTANCES)
return True
pool = Pool(2)
r1 = pool.apply_async(multi_append)
r2 = pool.apply_async(multi_append)
r3 = pool.apply_async(multi_append)
self.assertTrue(r1.get())
self.assertTrue(r2.get())
self.assertTrue(r3.get())
pool.terminate()
pool.join()
示例3: test_pickleable_shells
# 需要导入模块: from w3af.core.controllers.threads.threadpool import Pool [as 别名]
# 或者: from w3af.core.controllers.threads.threadpool.Pool import join [as 别名]
def test_pickleable_shells(self):
pool = Pool(1)
xurllib = ExtendedUrllib()
original_shell = Shell(MockVuln(), xurllib, pool)
kb.append('a', 'b', original_shell)
unpickled_shell = kb.get('a', 'b')[0]
self.assertEqual(original_shell, unpickled_shell)
self.assertEqual(unpickled_shell.worker_pool, None)
self.assertEqual(unpickled_shell._uri_opener, None)
pool.terminate()
pool.join()
xurllib.end()
示例4: test_max_queued_tasks
# 需要导入模块: from w3af.core.controllers.threads.threadpool import Pool [as 别名]
# 或者: from w3af.core.controllers.threads.threadpool.Pool import join [as 别名]
def test_max_queued_tasks(self):
worker_pool = Pool(processes=1, max_queued_tasks=2)
# These tasks should be queued very fast
worker_pool.apply_async(func=time.sleep, args=(2,))
worker_pool.apply_async(func=time.sleep, args=(2,))
worker_pool.apply_async(func=time.sleep, args=(2,))
worker_pool.apply_async(func=time.sleep, args=(2,))
# Now the pool is full and we need to wait in the main
# thread to get the task queued
start = time.time()
worker_pool.apply_async(func=time.sleep, args=(2,))
spent = time.time() - start
worker_pool.close()
worker_pool.join()
self.assertLess(spent, 2.1)
self.assertGreater(spent, 1.9)
示例5: BaseConsumer
# 需要导入模块: from w3af.core.controllers.threads.threadpool import Pool [as 别名]
# 或者: from w3af.core.controllers.threads.threadpool.Pool import join [as 别名]
class BaseConsumer(Process):
"""
Consumer thread that takes fuzzable requests from a Queue that's populated
by the crawl plugins and identified vulnerabilities by performing various
requests.
"""
def __init__(self, consumer_plugins, w3af_core, thread_name,
create_pool=True):
"""
:param base_consumer_plugins: Instances of base_consumer plugins in a list
:param w3af_core: The w3af core that we'll use for status reporting
:param thread_name: How to name the current thread
:param create_pool: True to create a worker pool for this consumer
"""
super(BaseConsumer, self).__init__(name='%sController' % thread_name)
self.in_queue = QueueSpeed()
self._out_queue = Queue.Queue()
self._consumer_plugins = consumer_plugins
self._w3af_core = w3af_core
self._tasks_in_progress = {}
self._threadpool = None
if create_pool:
self._threadpool = Pool(10, worker_names='%sWorker' % thread_name)
def run(self):
"""
Consume the queue items, sending them to the plugins which are then
going to find vulnerabilities, new URLs, etc.
"""
while True:
work_unit = self.in_queue.get()
if work_unit == POISON_PILL:
# Close the pool and wait for everyone to finish
self._threadpool.close()
self._threadpool.join()
del self._threadpool
self._teardown()
# Finish this consumer and everyone consuming the output
self._out_queue.put(POISON_PILL)
self.in_queue.task_done()
break
else:
# pylint: disable=E1120
self._consume_wrapper(work_unit)
self.in_queue.task_done()
def _teardown(self):
raise NotImplementedError
def _consume(self, work_unit):
raise NotImplementedError
@task_decorator
def _consume_wrapper(self, function_id, work_unit):
"""
Just makes sure that all _consume methods are decorated as tasks.
"""
return self._consume(work_unit)
def _task_done(self, function_id):
"""
The task_in_progress_counter is needed because we want to know if the
consumer is processing something and let it finish. It is mainly used
in the has_pending_work().
For example:
* You can have pending work if there are items in the input_queue
* You can have pending work if there are still items to be read from
the output_queue by one of the consumers that reads our output.
* You can have pending work when there are no items in input_queue
and no items in output_queue but the threadpool inside the consumer
is processing something. This situation is handled by the
self._tasks_in_progress attribute and the _add_task and
_task_done methods.
So, for each _add_task() there has to be a _task_done() even if the
task ends in an error or exception.
Recommendation: Do NOT set the callback for apply_async to call
_task_done, the Python2.7 pool implementation won't call it if the
function raised an exception and you'll end up with tasks in progress
that finished with an exception.
"""
try:
#.........这里部分代码省略.........
示例6: test_terminate_join
# 需要导入模块: from w3af.core.controllers.threads.threadpool import Pool [as 别名]
# 或者: from w3af.core.controllers.threads.threadpool.Pool import join [as 别名]
def test_terminate_join(self):
worker_pool = Pool(1, worker_names='WorkerThread')
worker_pool.terminate()
worker_pool.join()
示例7: BaseConsumer
# 需要导入模块: from w3af.core.controllers.threads.threadpool import Pool [as 别名]
# 或者: from w3af.core.controllers.threads.threadpool.Pool import join [as 别名]
#.........这里部分代码省略.........
self._threadpool = None
if create_pool:
self._threadpool = Pool(thread_pool_size or self.THREAD_POOL_SIZE,
worker_names='%sWorker' % thread_name,
max_queued_tasks=max_pool_queued_tasks)
def get_pool(self):
return self._threadpool
def run(self):
"""
Consume the queue items, sending them to the plugins which are then
going to find vulnerabilities, new URLs, etc.
"""
while True:
try:
work_unit = self.in_queue.get()
except KeyboardInterrupt:
# https://github.com/andresriancho/w3af/issues/9587
#
# If we don't do this, the thread will die and will never
# process the POISON_PILL, which will end up in an endless
# wait for .join()
continue
if work_unit == POISON_PILL:
try:
# Close the pool and wait for everyone to finish
if self._threadpool is not None:
self._threadpool.close()
self._threadpool.join()
self._threadpool = None
self._teardown()
finally:
# Finish this consumer and everyone consuming the output
self._out_queue.put(POISON_PILL)
self.in_queue.task_done()
break
else:
# pylint: disable=E1120
try:
self._consume_wrapper(work_unit)
finally:
self.in_queue.task_done()
def _teardown(self):
raise NotImplementedError
def _consume(self, work_unit):
raise NotImplementedError
@task_decorator
def _consume_wrapper(self, function_id, work_unit):
"""
Just makes sure that all _consume methods are decorated as tasks.
"""
return self._consume(work_unit)
def _task_done(self, function_id):
"""
The task_in_progress_counter is needed because we want to know if the