本文整理汇总了Python中threadpool.ThreadPool.putRequest方法的典型用法代码示例。如果您正苦于以下问题:Python ThreadPool.putRequest方法的具体用法?Python ThreadPool.putRequest怎么用?Python ThreadPool.putRequest使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类threadpool.ThreadPool
的用法示例。
在下文中一共展示了ThreadPool.putRequest方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: startWork
# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import putRequest [as 别名]
def startWork(self, work, argsList, resultCallback=None):
try:
requests = makeRequests(work, argsList, resultCallback, None)
job = ThreadPool(self.threadNum)
for req in requests:
job.putRequest(req)
job.wait()
except:
print sys.exc_info()
示例2: bfTest
# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import putRequest [as 别名]
def bfTest():
pool = ThreadPool(100)
for j in range(100):
alltime = []
for i in range(bingfa):
work = WorkRequest(threads, args=(int(random.random() * portnum) % portnum,))
pool.putRequest(work)
sleep((1.0 / bingfa) * random.random())
# threading.Thread(target=threads, args=(i % portnum,)).start()
pool.wait()
printdata(alltime)
示例3: ConcurrentTestPool
# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import putRequest [as 别名]
class ConcurrentTestPool(Singleton):
@synchronized_self
def init(self):
self.pool = ThreadPool(multiprocessing.cpu_count())
@synchronized_self
def put(self, callable_, args=None, kwds=None):
self.pool.putRequest(WorkRequest(callable_, args=args, kwds=kwds))
def join(self):
self.pool.wait()
示例4: refresh_tunnels
# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import putRequest [as 别名]
def refresh_tunnels(args):
tunnels = db.store.find(Tunnel)
if tunnels:
pool = ThreadPool(tunnels.count())
for tunnel in tunnels:
request = WorkRequest(tunnel.check_available)
pool.putRequest(request)
pool.wait()
for tunnel in tunnels:
host = db.store.get(Host, tunnel.hostid)
record = AvailabilityRecord.register(host, tunnel, check=False)
print record
示例5: gits_download
# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import putRequest [as 别名]
def gits_download(self, url, output="/tmp", threads=20):
if not self.output:
self.output = output
results = self.index(url, output=output)
if not results:
return
args = [((i[0], i[1]), {}) for i in self.giturls]
# ... and build a WorkRequest object for each item in data
requests = makeRequests(self.callback,
args,
self.print_result,
self.handle_exception)
main = ThreadPool(threads)
for req in requests:
main.putRequest(req)
print "Work request #%s added." % req.requestID
i = 0
while True:
try:
main.poll()
print "Main thread working...",
print "(active worker threads: %i)" % (
threading.activeCount()-1, )
if i == 10:
print "**** Adding 3 more worker threads..."
main.createWorkers(3)
if i == 20:
print "**** Dismissing 2 worker threads..."
main.dismissWorkers(2)
i += 1
except KeyboardInterrupt:
print "**** Interrupted!"
break
except NoResultsPending:
print "**** No pending results."
break
if main.dismissedWorkers:
print "Joining all dismissed worker threads..."
main.joinAllDismissedWorkers()
示例6: TaskPool
# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import putRequest [as 别名]
class TaskPool(object):
def __init__(self, limit, logger=None, **kwargs):
self.limit = limit
self.logger = logger or log.get_default_logger()
self._pool = None
def start(self):
self._pool = ThreadPool(self.limit)
def stop(self):
self._pool.dismissWorkers(self.limit, do_join=True)
def apply_async(self, target, args=None, kwargs=None, callbacks=None,
errbacks=None, accept_callback=None, **compat):
args = args or []
kwargs = kwargs or {}
callbacks = callbacks or []
errbacks = errbacks or []
on_ready = partial(self.on_ready, callbacks, errbacks)
self.logger.debug("ThreadPool: Apply %s (args:%s kwargs:%s)" % (
target, args, kwargs))
req = WorkRequest(do_work, (target, args, kwargs, on_ready,
accept_callback))
self._pool.putRequest(req)
# threadpool also has callback support,
# but for some reason the callback is not triggered
# before you've collected the results.
# Clear the results (if any), so it doesn't grow too large.
self._pool._results_queue.queue.clear()
return req
def on_ready(self, callbacks, errbacks, ret_value):
"""What to do when a worker task is ready and its return value has
been collected."""
if isinstance(ret_value, ExceptionInfo):
if isinstance(ret_value.exception, (
SystemExit, KeyboardInterrupt)): # pragma: no cover
raise ret_value.exception
[errback(ret_value) for errback in errbacks]
else:
[callback(ret_value) for callback in callbacks]
示例7: start_work
# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import putRequest [as 别名]
def start_work(
self,
work,
args_list,
result_callback=None,
):
outputs = dict()
try:
requests = makeRequests(work, args_list, result_callback,
None)
job = ThreadPool(self.thread_num)
for req in requests:
req.outputs = outputs
job.putRequest(req)
job.wait()
except:
traceback.print_exc()
return outputs
示例8: TaskPool
# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import putRequest [as 别名]
class TaskPool(BasePool):
def on_start(self):
self._pool = ThreadPool(self.limit)
def on_stop(self):
self._pool.dismissWorkers(self.limit, do_join=True)
def on_apply(self, target, args=None, kwargs=None, callback=None,
accept_callback=None, **_):
req = WorkRequest(apply_target, (target, args, kwargs, callback,
accept_callback))
self._pool.putRequest(req)
# threadpool also has callback support,
# but for some reason the callback is not triggered
# before you've collected the results.
# Clear the results (if any), so it doesn't grow too large.
self._pool._results_queue.queue.clear()
return req
示例9: run
# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import putRequest [as 别名]
def run(self):
page = self.getPage(baseURL)
linksTitleDict = self.getTitleList(page)
# for i, link in enumerate(articleLinks):
# print('parsing links in title: ' + articleTitle[i])
# links = self.getHomePageList(link)
pool = ThreadPool(10)
requests = makeRequests(self.getHomePageList, linksTitleDict.keys())
#[pool.putRequest(req) for req in requests]
for req in requests:
pool.putRequest(req)
print(req)
pool.wait()
for link in self.links:
print(link)
示例10: same_ms
# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import putRequest [as 别名]
def same_ms(product_id):
data = {'product_id': product_id, 'address_id': '72858'}
url = 'http://payment.ohsame.com/order_create'
time_s = time.time()
pool = ThreadPool(20)
reqs = makeRequests(same_ms_req, [((url, data), {}) for i in range(200)], same_ms_callback)
[pool.putRequest(req) for req in reqs]
pool.wait()
time_e = time.time()
print('秒杀商品:%s\n' % str(product_id))
print('秒杀结果:%s\n' % rs_ms)
print('秒杀耗时:%s\n' % (time_e-time_s))
示例11: run_prod
# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import putRequest [as 别名]
def run_prod():
cycle_count=1
main = ThreadPool(num_workers=PARSER_THREAD_COUNT)
while True:
ADMIN_LOGGER.info("Starting cycle : "+str(cycle_count))
reload(P_ROOT)
process_list = [[e, __import__(P_ROOT.__name__ + '.' + e + '.main', fromlist=e)] for e in P_ROOT.__all__]
process_dict=dict(process_list)
ADMIN_LOGGER.info("Executing process list : "+str(process_dict.items()))
for proc_name in process_dict.keys():
proc=getattr(process_dict.get(proc_name),'Parser','None')
main.putRequest(WorkRequest(proc_runner, args=(1,proc),callback=None))
ADMIN_LOGGER.info("Started thread : "+proc_name)
try:
main.poll()
except NoResultsPending:
pass
except :
ADMIN_LOGGER.error(traceback.format_exc())
main.wait()
ADMIN_LOGGER.info("Sleeping for default LISTING_SLEEP_TIME : "+str(GLOBAL_SLEEP_TIME))
time.sleep(GLOBAL_SLEEP_TIME)
cycle_count= 1 if cycle_count > 9999 else cycle_count+1
示例12: ThreadedHandler
# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import putRequest [as 别名]
class ThreadedHandler(BaseHandler, threading.Thread):
"""Daemon thread that spawns a thread pool for sending Airbrake notices."""
def __init__(self, settings):
BaseHandler.__init__(self, settings)
thread_name = 'Airbrake{0}-{1}'.format(self.__class__.__name__,
os.getpid())
threading.Thread.__init__(self, name=thread_name)
self.n_threads = settings['threaded.threads']
self.poll_interval = settings['threaded.poll_interval']
self.daemon = True # daemon thread -- important!
self.pool = ThreadPool(self.n_threads)
self.start()
def report(self, payload):
request = WorkRequest(
self._submit,
args=(payload,),
exc_callback = _exception_handler
)
self.pool.putRequest(request)
def run(self):
"""Poll the pool for results and process them."""
while True:
try:
time.sleep(self.poll_interval)
self.pool.poll()
except NoResultsPending:
pass
示例13: start_thread
# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import putRequest [as 别名]
def start_thread(self):
args_list = []
ips = self.parse_ip()
for ip in ips:
args = self.args.copy()
args['ip'] = ip
args_list.append(args)
self.cui.w('Proxy Scanner started')
self.cui.i('Nums: %s' % len(args_list))
self.cui.i('Port: %s' % self.args['port'])
self.cui.i('Thread: %s' % self.args['thread'])
pool = ThreadPool(self.args['thread'])
reqs = makeRequests(self.run, args_list)
[pool.putRequest(req) for req in reqs]
pool.wait()
示例14: run_example
# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import putRequest [as 别名]
def run_example():
num_workers = 3
pool = ThreadPool(num_workers)
# This variable will tell us whether all threads worked or not. Stored in
# an object (list) otherwise the inner definition cannot modify it.
success = [True]
# The exception handler is not required, but if it's not used the error
# will be silent.
def exc_handler(work_request, exc_info):
mldb.log(traceback.format_tb(exc_info[2]))
exception_type = exc_info[0]
exception_message = exc_info[1]
mldb.log(str(exception_type) + ': ' + str(exception_message))
success[0] = False
# If there is an error, stop all threads as soon as possible
pool.dismissWorkers(num_workers, do_join=True)
# makeRequests takes, as a second argument, a list of tuples where the
# first element is *args and the second one is **kwargs and where each
# tuple represents a job to run.
#
# Here we schedule two jobs.
requests = makeRequests(some_func, [(['patate'], {}), (['orange'], {})],
exc_callback=exc_handler)
[pool.putRequest(req) for req in requests]
# pool.wait will raise an exception if an error occurs in an early jobs and
# more jobs need to be run. It's ok, if there is an error we want to stop
# anyway.
pool.wait()
# If the error occurred in one of the last jobs pool.wait will have worked
# so we need to check it anyway.
if not success[0]:
mldb.log("An error occured")
return
# It is important (MLDBFB-470) to properly dismiss the workers
pool.dismissWorkers(num_workers, do_join=True)
mldb.log("Out of main thread")
示例15: list
# 需要导入模块: from threadpool import ThreadPool [as 别名]
# 或者: from threadpool.ThreadPool import putRequest [as 别名]
sections = list(range(first_pid, last_pid, step))
# while pid > last_pid:
# data.append(([gid, pid], []))
# pid -= 1
# group_count += 1
# if group_count == 25:
# thread_requests = makeRequests(request_period_html, data)
# [tp.putRequest(req) for req in thread_requests]
# group_count = 0
# data.clear()
logging.info("Generating period id section...")
for pid in sections:
data.append(([gid, pid, step], []))
thread_requests = makeRequests(request_period_html, data)
thread_requests = makeRequests(request_period_html, data)
[tp.putRequest(req) for req in thread_requests]
logging.info("Scraping details of periods...")
tp.wait()
elif getperiodfrom == 'database':
# Get periods from database
logging.info("Obtaining periods from database...")
session = DBScopedSession()
list_periods = session.query(Period).filter(Period.pid < 302201602).all()
session.expunge_all()
# print(len(list_periods))
# print(len(list_owners))
# session = DBSession()
# session = DBScopedSession()