本文整理汇总了Python中multiprocessing.pool.close方法的典型用法代码示例。如果您正苦于以下问题:Python pool.close方法的具体用法?Python pool.close怎么用?Python pool.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.pool
的用法示例。
在下文中一共展示了pool.close方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_no_import_lock_contention
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import close [as 别名]
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
示例2: test_unpickleable_result
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import close [as 别名]
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
示例3: test_pool_worker_lifetime_early_close
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import close [as 别名]
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test of creating a customized manager class
#
示例4: test_large_fd_transfer
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import close [as 别名]
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
示例5: _listener
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import close [as 别名]
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
示例6: test_timeout
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import close [as 别名]
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
p.join(10)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
示例7: get_high_socket_fd
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import close [as 别名]
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
示例8: test_ignore
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import close [as 别名]
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x'*(1024*1024))
time.sleep(0.1)
p.join()
finally:
conn.close()
示例9: test_ignore_listener
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import close [as 别名]
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
示例10: _listener
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import close [as 别名]
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen(1)
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
示例11: scrape_pages
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import close [as 别名]
def scrape_pages(pages):
for page in pages:
make_transient(page)
# free up the connection while doing net IO
db.session.close()
db.engine.dispose()
pool = get_worker_pool()
map_results = pool.map(scrape_with_timeout, pages, chunksize=1)
scraped_pages = [p for p in map_results if p]
logger.info(u'finished scraping all pages')
pool.close()
pool.join()
logger.info(u'preparing update records')
row_dicts = [x.__dict__ for x in scraped_pages]
for row_dict in row_dicts:
row_dict.pop('_sa_instance_state')
logger.info(u'saving update records')
db.session.bulk_update_mappings(PageNew, row_dicts)
scraped_page_ids = [p.id for p in scraped_pages]
return scraped_page_ids
# need to spawn processes from workers but can't do that if worker is daemonized
示例12: scrape_with_timeout
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import close [as 别名]
def scrape_with_timeout(page):
pool = NDPool(processes=1)
async_result = pool.apply_async(scrape_page, (page,))
result = None
try:
result = async_result.get(timeout=600)
pool.close()
except TimeoutError:
logger.info(u'page scrape timed out: {}'.format(page))
pool.terminate()
pool.join()
return result
示例13: _fetch_stock_data
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import close [as 别名]
def _fetch_stock_data(self, stock_list):
"""获取股票信息"""
pool = multiprocessing.pool.ThreadPool(len(stock_list))
try:
res = pool.map(self.get_stocks_by_range, stock_list)
finally:
pool.close()
return [d for d in res if d is not None]
示例14: as_bulk_resolve
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import close [as 别名]
def as_bulk_resolve(candidates, threads=50):
"""
Resolve a list of IPs to AS information.
Returns a map of each result as a tuple of (ASN, owner) keyed to
its candidate. Returns None if no ASN could be found or (ASN,
None) if an ASN was found but no owner is available.
WARNING: This function will create a pool of up to 'threads'
threads.
"""
result = {}
if not candidates:
return result
pool = multiprocessing.pool.ThreadPool(
processes=min(len(candidates), threads))
for ip, as_ in pool.imap(
__asresolve__,
candidates,
chunksize=1):
result[ip] = as_
pool.close()
return result
示例15: run
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import close [as 别名]
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()