本文整理汇总了Python中multiprocessing.pool.ThreadPool.apply方法的典型用法代码示例。如果您正苦于以下问题:Python ThreadPool.apply方法的具体用法?Python ThreadPool.apply怎么用?Python ThreadPool.apply使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.pool.ThreadPool
的用法示例。
在下文中一共展示了ThreadPool.apply方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: MainFrame
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import apply [as 别名]
class MainFrame(wx.Frame):
"""Main frame."""
def __init__(self):
self.pool = ThreadPool()
super(MainFrame, self).__init__(None, title = ' Opperational Status')
p = wx.Panel(self)
default_style = wx.TE_RICH | wx.TE_READONLY | wx.TE_MULTILINE
sizer_sms = wx.BoxSizer(wx.VERTICAL)
sizer_sms.Add(wx.StaticText(p, label = '&SMS Status'), 0, wx.GROW)
self.status_sms = wx.TextCtrl(p, style = default_style)
sizer_sms.Add(self.status_sms, 1, wx.GROW)
sizer_3r = wx.BoxSizer(wx.VERTICAL)
sizer_3r.Add(wx.StaticText(p, label = '&3R Status'), 0, wx.GROW)
self.status_3r = wx.TextCtrl(p, style = default_style)
sizer_3r.Add(self.status_3r, 1, wx.GROW)
sizer_email = wx.BoxSizer(wx.VERTICAL)
sizer_email.Add(wx.StaticText(p, label = '&Email Status'), 0, wx.GROW)
self.status_email = wx.TextCtrl(p, style = default_style)
sizer_email.Add(self.status_email, 1, wx.GROW)
s = wx.BoxSizer(wx.HORIZONTAL)
s.AddMany(
[
(sizer_sms, 1, wx.GROW),
(sizer_3r, 1, wx.GROW),
(sizer_email, 1, wx.GROW)
]
)
self.timer = wx.Timer(self)
self.Bind(wx.EVT_SHOW, self.on_show)
self.Bind(wx.EVT_TIMER, self.on_update, self.timer)
self.Bind(wx.EVT_CLOSE, self.on_close)
def on_show(self, event):
"""Window has been shown."""
self.timer.Start(10000)
self.on_update(event)
event.Skip()
def _get_sms(self):
wx.CallAfter(self.status_sms.SetValue, get_sms())
def _get_email(self):
wx.CallAfter(self.status_email.SetValue, get_email())
def on_update(self, event):
self.pool.apply(self._get_sms)
self.pool.apply(self._get_email)
def on_close(self, event):
"""Window is closing."""
self.timer.Stop()
event.Skip()
示例2: consume_keys_asynchronous_threads
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import apply [as 别名]
def consume_keys_asynchronous_threads(self):
"""
Work through the keys to look up asynchronously using multiple threads
"""
print("\nLooking up " + self.input_queue.qsize().__str__() + " keys from " + self.source_name + "\n")
jobs = multiprocessing.cpu_count()*4 if (multiprocessing.cpu_count()*4 < self.input_queue.qsize()) \
else self.input_queue.qsize()
pool = ThreadPool(jobs)
for x in range(jobs):
pool.apply(self.data_worker, [], self.worker_args)
pool.close()
pool.join()
示例3: TCPServer
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import apply [as 别名]
class TCPServer(object):
"""
Base Server class. Listens for requests
and queues them to be handled by a worker thread.
"""
def __init__(self, host=None, port=None, **kwargs):
self.host = host if host else SERVER_DEFAULT
self.port = port if port else PORT_DEFAULT
self.commands = kwargs.get("commands", {})
threads = kwargs.get("threads", NUM_THREADS)
self.request_queue = ThreadPool(threads)
self.socket = None
self.make_conn()
self.start_signal_handler()
def make_conn(self):
"""
Open a socket and bind it to our address and port.
"""
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind((self.host, self.port))
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.listen(5)
def signal_handler(self, signal, frame):
self.request_queue.join()
self.socket.close()
def start_signal_handler(self):
signal.signal(signal.SIGINT, self.signal_handler)
def listen(self):
print "TCPServer is listening at %s:%d!" % (self.host, self.port)
hf = HandlerFactory(self.commands)
while True:
logging.debug("TCPServer accepting requests.")
client_sock, client_addr = self.socket.accept()
client_host, client_port = client_addr
logging.debug("TCPServer handling request from %s:%s." % (client_host, client_port))
handler = RequestHandler(hf,
client_host,
client_port,
client_sock)
self.request_queue.apply(handler.handle, ())
self.socket.close()
示例4: __init__
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import apply [as 别名]
class IndexSearcher:
def __init__(self, searchThreadNum, indexManager):
self._indexManager = indexManager
self._logger = Logger.Get('IndexSearcher')
self._searchThreads = Pool(searchThreadNum)
def Search(self, termIdList):
indexSearchRequest = IndexSearchRequest(termIdList)
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug('SearchPrepare, termIdList = %s'
% str(termIdList))
self._Search(indexSearchRequest)
return indexSearchRequest.result
def _Search(self, indexSearchRequest):
termIdList = indexSearchRequest.termIdList
for termId in termIdList:
(ret, retCode) = self._indexManager.Fetch(termId)
if retCode == True:
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug('fetch term %d success' % termId)
indexSearchRequest.indexHandler.Add(ret)
else:
if ret == None:
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug('term %d not exist' % termId)
indexSearchRequest.result = None
return
else:
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug('fetch term %d from diskio' % termId)
indexSearchRequest.waitingIORequests.append(ret)
self._searchThreads.apply(self._Searching, (indexSearchRequest,))
def _Searching(self, indexSearchRequest):
waitingRequests = indexSearchRequest.waitingIORequests
indexHandler = indexSearchRequest.indexHandler
for readRequest in waitingRequests:
readRequest.Wait()
indexHandler.Add(readRequest.result)
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug('all posting list are ready, request id: %s'
% indexSearchRequest.id)
indexSearchRequest.result = indexHandler.Intersect()
示例5: _run_processing_jobs
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import apply [as 别名]
def _run_processing_jobs(parameter_dict, reader, n_processes, process_batch_size):
"""Creates document batches and dispatches them to processing nodes.
:param parameter_dict: dataset import's parameters.
:param reader: dataset importer's document reader.
:param n_processes: size of the multiprocessing pool.
:param process_batch_size: the number of documents to process at any given time by a node.
:type parameter_dict: dict
:type n_processes: int
:type process_batch_size: int
"""
from django import db
db.connections.close_all()
if parameter_dict.get('remove_existing_dataset', False):
_remove_existing_dataset(parameter_dict)
import_job_lock = Lock()
process_pool = Pool(processes=n_processes, initializer=_init_pool, initargs=(import_job_lock,))
batch = []
for document in reader.read_documents(**parameter_dict):
batch.append(document)
# Send documents when they reach their batch size and empty it.
if len(batch) == process_batch_size:
process_pool.apply(_processing_job, args=(batch, parameter_dict))
batch = []
# Send the final documents that did not reach the batch size.
if batch:
process_pool.apply(_processing_job, args=(batch, parameter_dict))
process_pool.close()
process_pool.join()
_complete_import_job(parameter_dict)
示例6: __init__
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import apply [as 别名]
class HTTPFetchPool:
_num_thread = 5
_retry_thread = 50
_retry_limit = 10
_thread_pool = None
_retry_pool = None
_timeout = 3
_retry_timeout = 10
_retry_sleep = 3
def __init__ (self, num_thread=5, retry_thread=50, retry_limit=10):
self._num_thread = num_thread
self._retry_thread = retry_thread
self._retry_limit = retry_limit
def start (self):
self._thread_pool = ThreadPool(self._num_thread)
self._retry_pool = ThreadPool(self._retry_thread)
def addAsyncJob (self, url, headers=None, data=None, callback=None, *args,
**kwargs):
kwargs['_fetcher'] = self
kwargs['_url'] = url
kwargs['_header'] = headers
kwargs['_data'] = data
kwargs['_callback'] = callback
kwargs['_async'] = True
return self._thread_pool.apply_async(self.download, args, kwargs
, self.middleman)
def addSyncJob (self, url, headers=None, data=None, callback=None, *args,
**kwargs):
kwargs['_fetcher'] = self
kwargs['_url'] = url
kwargs['_header'] = headers
kwargs['_data'] = data
kwargs['_callback'] = callback
kwargs['_async'] = False
# try:
result = self._thread_pool.apply(self.download, args, kwargs)
# except Exception as err:
# result.status = -1
# result.exception = err
# print err.reason
# raise
return self.middleman(result)
def addRetryJob (self, *args, **kwargs):
if kwargs['_async']:
return self._retry_pool.apply_async(self.retry, args, kwargs, self.middleman)
else:
return self._retry_pool.apply(self.retry, args, kwargs)
@classmethod
def middleman (cls, result):
print "Middleman"
callback = result.kwargs['_callback']
if result.status == -1 and result.retry_asyncresult != None:
return result
if callback:
result = callback(result)
return result
def stop (self):
self._thread_pool.close()
self._thread_pool.join()
self._retry_pool.close()
self._retry_pool.join()
def retry (self, *args, **kwargs):
url = kwargs['_url']
headers = kwargs['_header']
data = kwargs['_data']
print "Start retry " + url
result = HTTPFetchResult()
retrycount = 0
while retrycount < self._retry_limit:
retrycount = retrycount + 1
try:
result = doDownload(url, headers, data, self._retry_timeout)
except (HTTPError, URLError) as e:
print "Error %d/%d" % (retrycount, self._retry_limit)
print e.reason
result.status = -1
result.exception = e
result.retry_asyncresult = None
except Exception as err:
print "Fatal Error " + url + " " + err.reason
result.status = -1
result.exception = err
result.retry_asyncresult = None
result.args = args
result.kwargs = kwargs
raise
#.........这里部分代码省略.........
示例7: ThreadPool
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import apply [as 别名]
#!/usr/bin/env python
"""
Parallel clusterized gridftp sync
"""
import os
import time
import subprocess
from functools import partial
from multiprocessing.pool import ThreadPool
source = '/mnt/lfs4/simprod/dagtemp2/20009'
dest = 'gsiftp://gridftp-scratch.icecube.wisc.edu/local/simprod/20009'
pool = ThreadPool(100)
for root,dirs,files in os.walk(source):
for f in files:
s = os.path.join(root,f)
d = s.replace(source,dest)
cmd = 'globus-url-copy -sync -v -cd -rst file://%s %s'%(s,d)
pool.apply(partial(subprocess.call,cmd,shell=True))
pool.join()