本文整理汇总了Python中concurrent.futures.ThreadPoolExecutor方法的典型用法代码示例。如果您正苦于以下问题:Python futures.ThreadPoolExecutor方法的具体用法?Python futures.ThreadPoolExecutor怎么用?Python futures.ThreadPoolExecutor使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类concurrent.futures
的用法示例。
在下文中一共展示了futures.ThreadPoolExecutor方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import ThreadPoolExecutor [as 别名]
def __init__(self, maxsize = 0, worker_threads = 1, unpack_threads = 1, inspect_threads = 1, idb_threads = 1, bindiff_threads = 1):
"""
Create a Bass server.
:param maxsize: Maximum size of the job queue. If the queue is full, jobs are rejected. 0 means unlimited.
:param threads: Number of worker threads to use.
"""
#TODO: Access to jobs is not threadsafe
self.job_counter = 1
self.jobs = {}
self.jobs_lock = Lock()
self.input_queue = Queue(maxsize)
self.unpack_executor = ThreadPoolExecutor(max_workers = unpack_threads)
self.inspect_executor = ThreadPoolExecutor(max_workers = inspect_threads)
self.idb_executor = ThreadPoolExecutor(max_workers = idb_threads)
self.bindiff_executor = ThreadPoolExecutor(max_workers = bindiff_threads)
self.inspectors = [MagicInspector(), SizeInspector(), FileTypeInspector()]
self.terminate = False
self.threads = [start_thread(self.process_job) for _ in range(worker_threads)]
self.bindiff = BindiffClient(urls = [BINDIFF_SERVICE_URL])
self.whitelist = FuncDB(FUNCDB_SERVICE_URL)
self.ida = IdaClient(urls = [IDA_SERVICE_URL])
示例2: main
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import ThreadPoolExecutor [as 别名]
def main():
t1 = timeit.default_timer()
with ProcessPoolExecutor(max_workers=4) as executor:
for number, prime in zip(PRIMES, executor.map(is_prime, PRIMES)):
print('%d is prime: %s' % (number, prime))
print("{} Seconds Needed for ProcessPoolExecutor".format(timeit.default_timer() - t1))
t2 = timeit.default_timer()
with ThreadPoolExecutor(max_workers=4) as executor:
for number, prime in zip(PRIMES, executor.map(is_prime, PRIMES)):
print('%d is prime: %s' % (number, prime))
print("{} Seconds Needed for ThreadPoolExecutor".format(timeit.default_timer() - t2))
t3 = timeit.default_timer()
for number in PRIMES:
isPrime = is_prime(number)
print("{} is prime: {}".format(number, isPrime))
print("{} Seconds needed for single threaded execution".format(timeit.default_timer()-t3))
示例3: main
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import ThreadPoolExecutor [as 别名]
def main(unused_argv):
servers = []
server_creds = loas2.loas2_server_credentials()
port = FLAGS.port
if not FLAGS.run_on_borg:
port = 20000 + FLAGS.server_id
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10), ports=(port,))
servicer = ars_evaluation_service.ParameterEvaluationServicer(
FLAGS.config_name, worker_id=FLAGS.server_id)
ars_evaluation_service_pb2_grpc.add_EvaluationServicer_to_server(
servicer, server)
server.add_secure_port("[::]:{}".format(port), server_creds)
servers.append(server)
server.start()
print("Start server {}".format(FLAGS.server_id))
# prevent the main thread from exiting
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
for server in servers:
server.stop(0)
示例4: itn_handler
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import ThreadPoolExecutor [as 别名]
def itn_handler(host, port): # type: (str, int) -> Iterator[Queue]
"""
Usage::
with itn_handler(ITN_HOST, ITN_PORT) as itn_queue:
# ...complete PayFast payment...
itn_data = itn_queue.get(timeout=2)
"""
server_address = (host, port)
http_server = HTTPServer(server_address, PayFastITNHandler)
http_server.itn_queue = Queue() # type: ignore
executor = ThreadPoolExecutor(max_workers=1)
executor.submit(http_server.serve_forever)
try:
yield http_server.itn_queue # type: ignore
finally:
http_server.shutdown()
示例5: _run_invoker_process
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import ThreadPoolExecutor [as 别名]
def _run_invoker_process(self, inv_id):
"""
Run process that implements token bucket scheduling approach
"""
logger.debug('ExecutorID {} - Invoker process {} started'.format(self.executor_id, inv_id))
with ThreadPoolExecutor(max_workers=250) as executor:
while True:
try:
self.token_bucket_q.get()
job, call_id = self.pending_calls_q.get()
except KeyboardInterrupt:
break
if self.running_flag.value:
executor.submit(self._invoke, job, call_id)
else:
break
logger.debug('ExecutorID {} - Invoker process {} finished'.format(self.executor_id, inv_id))
示例6: scan_server
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import ThreadPoolExecutor [as 别名]
def scan_server(
cls, server_info: "ServerConnectivityInfo", extra_arguments: Optional[_ScanCommandExtraArgumentsTypeVar] = None
) -> _ScanCommandResultTypeVar:
"""Utility method to run a scan command directly.
This is useful for the test suite to run commands without using the Scanner class. It should NOT be used to
actually run scans as this will be very slow (no multi-threading); use the Scanner class instead.
"""
thread_pool = ThreadPoolExecutor(max_workers=5)
all_jobs = cls.scan_jobs_for_scan_command(server_info, extra_arguments)
all_futures = []
for job in all_jobs:
future = thread_pool.submit(job.function_to_call, *job.function_arguments)
all_futures.append(future)
result = cls.result_for_completed_scan_jobs(server_info, all_futures)
return result
示例7: get_all_meta
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import ThreadPoolExecutor [as 别名]
def get_all_meta(show_id):
import xml.etree.ElementTree as ET
from concurrent import futures
from kmediatorrent.utils import url_get, joining
def _get_all_meta():
r = url_get("%s/all/%s.xml" % (show_base_url(show_id), LANG), headers=HEADERS, with_immunicity=False)
dom = ET.fromstring(r)
if not len(dom):
return
return update_image_urls(dom2dict(dom))
with futures.ThreadPoolExecutor(max_workers=2) as pool:
meta = pool.submit(_get_all_meta)
banners = pool.submit(get_banners, show_id)
meta = meta.result()
meta["series"][0]["episodes"] = meta["episode"]
meta = meta["series"][0]
meta["banners"] = banners.result() or []
return meta
示例8: __init__
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import ThreadPoolExecutor [as 别名]
def __init__(self, host, port):
self._main_executor = ThreadPoolExecutor(max_workers=1)
self._time_scheduler = TimeScheduler(self._main_executor)
self._udp_gateway = UDPGateway(
host,
port,
self._on_receive_message,
[
ListParametersProxy(),
UnicodeProxy(),
FragmentProxy(),
RendezvousRelayProxy(),
ReliabilityProxy(self._time_scheduler),
],
)
self._rendezvous_protocol = AgentRendezvousProtocolHandler(
self._udp_gateway,
)
示例9: _workers_pool
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import ThreadPoolExecutor [as 别名]
def _workers_pool(self):
if self._pool is not None:
return self._pool
# lazy init the workers pool
got_initialized = False
with type(self)._POOL_LOCK:
if self._pool is None:
self._pool = ThreadPoolExecutor(max_workers=self._pool_size,
thread_name_prefix='AsyncArcticWorker')
got_initialized = True
# Call hooks outside the lock, to minimize time-under-lock
if got_initialized:
for hook in self._pool_update_hooks:
hook(self._pool_size)
return self._pool
示例10: __init__
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import ThreadPoolExecutor [as 别名]
def __init__(self, executor=None):
"""Create instance of ThreadPoolExecutorRunner class"""
self._tick = 0.005 # Tick for sleep or partial timeout
self._in_shutdown = False
self._i_own_executor = False
self._was_timeout_called = False
self.executor = executor
self.logger = logging.getLogger('moler.runner.thread-pool')
self.logger.debug("created")
atexit.register(self.shutdown)
if executor is None:
max_workers = 1000 # max 1000 threads in pool
try: # concurrent.futures v.3.2.0 introduced prefix we like :-)
self.executor = ThreadPoolExecutor(max_workers=max_workers, thread_name_prefix='ThrdPoolRunner')
except TypeError as exc:
if ('unexpected' in str(exc)) and ('thread_name_prefix' in str(exc)):
self.executor = ThreadPoolExecutor(max_workers=max_workers)
else:
raise
self.logger.debug("created own executor {!r}".format(self.executor))
self._i_own_executor = True
else:
self.logger.debug("reusing provided executor {!r}".format(self.executor))
示例11: test_CancellableFuture_str_casting_shows_embedded_future
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import ThreadPoolExecutor [as 别名]
def test_CancellableFuture_str_casting_shows_embedded_future():
import threading
from moler.runner import CancellableFuture
from concurrent.futures import ThreadPoolExecutor
def fun_with_future_result(delay):
time.sleep(delay)
return delay * 2
executor = ThreadPoolExecutor()
observer_lock = threading.Lock()
stop_feeding = threading.Event()
feed_done = threading.Event()
connection_observer_future = executor.submit(fun_with_future_result, delay=0.1)
c_future = CancellableFuture(connection_observer_future, observer_lock, stop_feeding, feed_done)
connection_observer_future_as_str = str(connection_observer_future)
c_future_as_str = str(c_future)
assert c_future_as_str == "CancellableFuture({})".format(connection_observer_future_as_str)
executor.shutdown()
示例12: count
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import ThreadPoolExecutor [as 别名]
def count(self, lines):
# use the name server's prefix lookup to get all registered wordcounters
with locate_ns() as ns:
all_counters = ns.list(prefix="example.dc2.wordcount.")
# chop the text into chunks that can be distributed across the workers
# uses futures so that it runs the counts in parallel
# counter is selected in a round-robin fashion from list of all available counters
with futures.ThreadPoolExecutor() as pool:
roundrobin_counters = cycle(all_counters.values())
tasks = []
for chunk in grouper(200, lines):
tasks.append(pool.submit(self.count_chunk, next(roundrobin_counters), chunk))
# gather the results
print("Collecting %d results (counted in parallel)..." % len(tasks))
totals = Counter()
for task in futures.as_completed(tasks):
try:
totals.update(task.result())
except Pyro5.errors.CommunicationError as x:
raise Pyro5.errors.PyroError("Something went wrong in the server when collecting the responses: "+str(x))
return totals
示例13: __init__
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import ThreadPoolExecutor [as 别名]
def __init__(self):
self.root = tkinter.Tk()
self.root.title("Mandelbrot (Pyro multi CPU core version)")
canvas = tkinter.Canvas(self.root, width=res_x, height=res_y, bg="#000000")
canvas.pack()
self.img = tkinter.PhotoImage(width=res_x, height=res_y)
canvas.create_image((res_x/2, res_y/2), image=self.img, state="normal")
with locate_ns() as ns:
mandels = ns.yplookup(meta_any={"class:mandelbrot_calc_color"})
mandels = list(mandels.items())
print("{0} mandelbrot calculation servers found.".format(len(mandels)))
if not mandels:
raise ValueError("launch at least one mandelbrot calculation server before starting this")
self.mandels = [uri for _, (uri, meta) in mandels]
self.pool = futures.ThreadPoolExecutor(max_workers=len(self.mandels))
self.tasks = []
self.start_time = time.time()
for line in range(res_y):
self.tasks.append(self.calc_new_line(line))
self.root.after(100, self.draw_results)
tkinter.mainloop()
示例14: screen
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import ThreadPoolExecutor [as 别名]
def screen(self, start, width):
dr = width / self.res_x
di = dr*(self.res_x/self.res_y)
di *= 0.8 # aspect ratio correction
self.result = ["?"] * self.res_y
servers = [BatchProxy(Proxy(uri)) for uri in self.mandels]
with futures.ThreadPoolExecutor(max_workers=len(servers)*2) as pool:
for i in range(self.res_y):
server = servers[i % len(servers)]
server.calc_line(start, self.res_x, i*di, dr, i)
tasks = [pool.submit(server) for server in servers]
for task in futures.as_completed(tasks):
lines = task.result()
for (linenr, line) in lines:
self.result[linenr] = line
return "\n".join(self.result)
示例15: run
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import ThreadPoolExecutor [as 别名]
def run(self, concurrent=10):
"""
Entry point.
:param concurrent: number of threads to use
:return: message json
"""
children = [self.stac_file]
logger.info(f"Using {concurrent} threads")
while True:
with futures.ThreadPoolExecutor(max_workers=int(concurrent)) as executor:
future_tasks = [executor.submit(self._validate, url) for url in children]
children = []
for task in futures.as_completed(future_tasks):
message, status, new_children = task.result()
self.status = self._update_status(self.status, status)
self.message.append(message)
children.extend(new_children)
if not children:
break
return json.dumps(self.message)