本文整理汇总了Python中multiprocessing.dummy.Pool.terminate方法的典型用法代码示例。如果您正苦于以下问题:Python Pool.terminate方法的具体用法?Python Pool.terminate怎么用?Python Pool.terminate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.dummy.Pool
的用法示例。
在下文中一共展示了Pool.terminate方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: abortable_func
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import terminate [as 别名]
def abortable_func(func, *args, **kwargs):
"""
The abortable_func is the wrapper function, which wraps around function type "func", call
it in a background thread (multiprocessing.dummy.Thread), and terminates it after
"timeout" seconds.
This function is inspired by
http://stackoverflow.com/questions/29494001/how-can-i-abort-a-task-in-a-multiprocessing-pool-after-a-timeout
but is an improvement over the original solution, since the original solution is only
applicable to a function that takes positional arguments.
Parameters of the function:
func - the function that will be called and terminated if not return with "timeout" seconds
*args - positional arguments of "func"
**kwargs - named arguments of "func" + "timeout" value
"""
#- Get "timeout" value and create a ThreadPool (multiprocessing.dummy.Pool)
# with only 1 worker.
#- Use functools.partial (https://docs.python.org/3/library/functools.html)
# to fit all the arguments of the func into the interface of
# Pool.apply_async function
timeout = kwargs.pop('timeout', None);
p = ThreadPool(1);
partial_func = partial(func,**kwargs);
res = p.apply_async(partial_func,args);
#- Terminate the thread if it does not return after "timeout" seconds
# otherwise return the returned value of func
try:
out = res.get(timeout);
return out
except TimeoutError:
p.terminate()
return "{}:Timeout exceeded. Process terminated.\r\n".format(args[0]);
示例2: main
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import terminate [as 别名]
def main():
n = 1000000
m = 1
m2 = 10000
m3 = 100
create_db()
pool = Pool(processes=5)
start = time.time()
fill(n)
fill_time = time.time() - start
print('{} inserts in {}s'.format(n,fill_time))
start = time.time()
results = []
for _ in range(m):
results.append(pool.apply_async(read, ()))
# results.append(pool.apply_async(read_dataset, ()))
for i in range(m2):
results.append(pool.apply_async(read_one, ()))
if i%m3 == 0:
results.append(pool.apply_async(fill, (1,)))
for r in results:
r.get(timeout=1000000)
read_time = time.time() - start
pool.terminate()
print('{}.{} reads in {}s'.format(m,m2,read_time))
示例3: get_taotu_pages
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import terminate [as 别名]
def get_taotu_pages(category_url):
# 找到某个分类下全部的分页URL
print('process category: {0}'.format(category_url))
soup = commons.soup(category_url, encoding='utf8')
print('process index: {0}'.format(soup.title))
last_no = get_last_page_no(soup)
urls = ['{0}/list_{1}.html'.format(category_url, i) for i in range(2, last_no + 1)]
# for url in urls:
# download_by_page(url)
retry = 0
while True:
pool = ThreadPool(4)
try:
pool.map(download_by_page, urls)
pool.close()
pool.join()
print('all images downloaded completely.')
break
except KeyboardInterrupt, e:
print('download terminated by user, quit now.', e)
pool.terminate()
pool.join()
break
except Exception, e:
pool.terminate()
pool.join()
retry += 1
traceback.print_exc()
try:
print('download error: {0}, {1} retry in {2}s'.format(
e, retry, retry * 20 % 120))
except Exception:
pass
time.sleep(retry * 20 % 120)
示例4: put_from_manifest
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import terminate [as 别名]
def put_from_manifest(
s3_bucket,
s3_connection_host,
s3_ssenc,
s3_base_path,
aws_access_key_id,
aws_secret_access_key,
manifest,
bufsize,
concurrency=None,
incremental_backups=False,
):
"""
Uploads files listed in a manifest to amazon S3
to support larger than 5GB files multipart upload is used (chunks of 60MB)
files are uploaded compressed with lzop, the .lzo suffix is appended
"""
bucket = get_bucket(s3_bucket, aws_access_key_id, aws_secret_access_key, s3_connection_host)
manifest_fp = open(manifest, "r")
buffer_size = int(bufsize * MBFACTOR)
files = manifest_fp.read().splitlines()
pool = Pool(concurrency)
for _ in pool.imap(
upload_file, ((bucket, f, destination_path(s3_base_path, f), s3_ssenc, buffer_size) for f in files)
):
pass
pool.terminate()
if incremental_backups:
for f in files:
os.remove(f)
示例5: put_from_manifest
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import terminate [as 别名]
def put_from_manifest(
s3_bucket, s3_connection_host, s3_ssenc, s3_base_path,
aws_access_key_id, aws_secret_access_key, manifest,
bufsize, reduced_redundancy, rate_limit, concurrency=None, incremental_backups=False):
"""
Uploads files listed in a manifest to amazon S3
to support larger than 5GB files multipart upload is used (chunks of 60MB)
files are uploaded compressed with lzop, the .lzo suffix is appended
"""
exit_code = 0
bucket = get_bucket(
s3_bucket, aws_access_key_id,
aws_secret_access_key, s3_connection_host)
manifest_fp = open(manifest, 'r')
buffer_size = int(bufsize * MBFACTOR)
files = manifest_fp.read().splitlines()
pool = Pool(concurrency)
for f in pool.imap(upload_file,
((bucket, f, destination_path(s3_base_path, f), s3_ssenc, buffer_size, reduced_redundancy, rate_limit) for f in files if f)):
if f is None:
# Upload failed.
exit_code = 1
elif incremental_backups:
# Delete files that were successfully uploaded.
os.remove(f)
pool.terminate()
exit(exit_code)
示例6: dowload_all
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import terminate [as 别名]
def dowload_all(by_page=False):
# 下载全站标签对应的图片
items = range(1, 145) if by_page else get_all_tags()
retry = 0
while True:
pool = ThreadPool(4)
try:
pool.map(download_by_page if by_page else download_by_tag, items)
pool.close()
pool.join()
print('all images are downloaded completely.')
break
except KeyboardInterrupt, e:
print('download terminated by user, quit now.', e)
pool.terminate()
pool.join()
break
except Exception, e:
pool.terminate()
pool.join()
retry += 1
traceback.print_exc()
try:
print('download error: {0}, {1} retry in {2}s'.format(
e, retry, retry * 20 % 120))
except Exception:
pass
time.sleep(retry * 20 % 120)
示例7: run
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import terminate [as 别名]
def run():
t = [
('users', User().create),
('forums', Forum().create),
('threads', Thread().create),
('posts', Post().create),
("followers", User().follow),
("subscribptions", Thread().subscribe),
]
for entity, factory in t:
entities = [True for i in range(int(settings[entity]))]
num_tasks = len(entities)
pool = ThreadPool(int(settings['num_threads']))
try:
progress = range(5, 105, 5)
for i, _ in enumerate(pool.imap(factory, entities)):
perc = i * 100 / num_tasks
if perc % 5 == 0 and perc in progress:
log.print_out('Creating %s: %d%% done' % (entity, perc))
progress.remove(perc)
pool.close()
pool.join()
except Exception, e:
print e
pool.terminate()
sys.exit(1)
示例8: run
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import terminate [as 别名]
def run(self):
in_queue, out_queue = Queue(), Queue()
for i in self.a:
in_queue.put(i)
def f(in_queue, out_queue):
while not in_queue.empty():
time.sleep(1)
out_queue.put(in_queue.get()+1)
pool = Pool(4, f, (in_queue, out_queue))
self.b = []
while len(self.b) < len(self.a):
if not out_queue.empty():
self.b.append(out_queue.get())
pool.terminate()
示例9: download_pages
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import terminate [as 别名]
def download_pages(items):
retry = 0
while retry < 10:
pool = ThreadPool(4)
try:
pool.map(download_page, items)
pool.close()
pool.join()
break
except KeyboardInterrupt:
print('download terminated by user, quit execution.')
pool.terminate()
break
except Exception, e:
pool.terminate()
retry += 1
print('download error occurred: {0}, {1} retry in {2}s'.format(
e, retry, retry * 10))
time.sleep(retry * 10)
示例10: core
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import terminate [as 别名]
def core():
logging.basicConfig(level=logging.WARNING)
# check()
prepareprocess()
pool=ThreadPool(20)
time1 = datetime.now()
timeend= time1+timedelta(minutes=2)
while True:
time2 = datetime.now()
if(time2>timeend):
break
for i in classtorush:
pool.apply_async(rush,args=(i,))
time.sleep(0.25)
time.sleep(10)
pool.terminate()
pool.join()
示例11: process_parsing
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import terminate [as 别名]
def process_parsing(url, output_stream):
# url='http://www.yell.ru/spb/top/restorany/'
time_start = time.time()
url_prefix = 'http://www.yell.ru'
r = requests.get(url)
metro_urls = get_metro_stations(r.text)
logging.info("metroes: %d" % len(metro_urls))
# collect company urls for parsing
pool = ThreadPool(NUMBER_PROCESSES)
res_queue = ProcessQueue()
results = pool.map(collect_company_urls, [(url_prefix + u, res_queue) for u in metro_urls])
pool.close()
pool.join()
pool.terminate()
# reduce urls
count = 0
reduced_url_set = set()
while not res_queue.empty():
count = count + 1
url = res_queue.get_nowait()
# FOR TEST ONLY - REMOVE THIS
#if count < 10:
reduced_url_set.add(url)
logging.info("%d" % count)
logging.info("%d" % len(reduced_url_set))
# start company parsing pool ----
logging.info('start!!!!!!')
pool = ThreadPool(NUMBER_PROCESSES)
for field in FIELDS:
output_stream.write(field + ';')
output_stream.write('\n')
results = pool.map(parse_company_worker, [(url_prefix + u, output_stream) for u in reduced_url_set])
pool.close()
pool.join()
pool.terminate()
logging.info('done!!!!!')
logging.info('finished in %s seconds' % (time.time() - time_start))
示例12: execute_nodes
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import terminate [as 别名]
def execute_nodes(self):
num_threads = self.config.threads
target_name = self.config.target_name
text = "Concurrency: {} threads (target='{}')"
concurrency_line = text.format(num_threads, target_name)
dbt.ui.printer.print_timestamped_line(concurrency_line)
dbt.ui.printer.print_timestamped_line("")
pool = ThreadPool(num_threads)
try:
self.run_queue(pool)
except KeyboardInterrupt:
pool.close()
pool.terminate()
adapter = get_adapter(self.config)
if not adapter.is_cancelable():
msg = ("The {} adapter does not support query "
"cancellation. Some queries may still be "
"running!".format(adapter.type()))
yellow = dbt.ui.printer.COLOR_FG_YELLOW
dbt.ui.printer.print_timestamped_line(msg, yellow)
raise
for conn_name in adapter.cancel_open_connections():
dbt.ui.printer.print_cancel_line(conn_name)
pool.join()
dbt.ui.printer.print_run_end_messages(self.node_results,
early_exit=True)
raise
pool.close()
pool.join()
return self.node_results
示例13: _fetch_photos_multi
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import terminate [as 别名]
def _fetch_photos_multi(self):
rows = self.db.get_photo_status()
if not rows:
print('{0}的相册里没有照片'.format(self.target_id))
return
photos = []
for row in rows:
photos.append(json.loads(row['data']))
count = len(photos)
print("正在下载第{0}-{1}张照片 ...".format(
self.photo_total, self.photo_total+count))
pool = ThreadPool(8)
try:
pool.map(self._download_photo, photos)
pool.close()
pool.join()
self.photo_total += count
except KeyboardInterrupt:
pool.terminate()
示例14: ParallelRunner
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import terminate [as 别名]
class ParallelRunner(SkeletonRunner):
"""
This class is used to evaluate a skeleton through parallel primitives.
This runner allows to execute skeleton evaluation exploiting the
parallel architecture, using all available processing elements.
The parallelism is exploited evaulating the skeleton with different
input elements in parallel and then joining all results.
"""
def __init__(self):
self.pool = Pool(processes=36)
def __del__(self):
self.pool.close()
self.pool.terminate()
def run(self, skeleton, values, *params):
results = self.pool.map(eval_parallel(skeleton, params[1], params[0]), values)
self.pool.close()
return results
示例15: fetch_or_load_urls
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import terminate [as 别名]
def fetch_or_load_urls(fileanme):
if os.path.exists(jsonfile):
return json.load(open(jsonfile, 'r'))
pool = ThreadPool(8)
try:
pool.map(findurls, range(1, 51))
pool.close()
pool.join()
except KeyboardInterrupt:
print('terminated by user.')
pool.terminate()
print(len(urls))
items = [url_to_item(url) for url in sorted(urls, cmp=url_cmp)]
# json.dump(items, open('urls.json', 'w'),indent=2) # 输出\uxxxx
json.dump(items, codecs.open(fileanme, 'w', 'utf8'), # 输出中文文字
ensure_ascii=False, indent=2)
return items