本文整理汇总了Python中multiprocessing.pool.ThreadPool方法的典型用法代码示例。如果您正苦于以下问题:Python pool.ThreadPool方法的具体用法?Python pool.ThreadPool怎么用?Python pool.ThreadPool使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.pool
的用法示例。
在下文中一共展示了pool.ThreadPool方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: save_features
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import ThreadPool [as 别名]
def save_features(model, data_loaders, args):
model.eval()
os.makedirs(args.features_dir, exist_ok=True)
thread_pool = pool.ThreadPool(args.workers)
for data_loader in data_loaders:
data_index = 0
for input, target, prev_absolutes, next_absolutes, _ in data_loader:
input = Variable(input.cuda(async=True), volatile=True)
features = model.feats(input).data.cpu()
features_to_save = []
for feature in features:
relpath = data_loader.dataset.get_relpath(data_index)
feature_path = os.path.join(args.features_dir,
relpath + '.pytar')
features_to_save.append((feature, feature_path))
data_index += 1
thread_pool.map(_save_tensor, features_to_save)
示例2: migrate
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import ThreadPool [as 别名]
def migrate(pool, from_connection, to_connection):
"""
Migrate tool for pyspider
"""
f = connect_database(from_connection)
t = connect_database(to_connection)
if isinstance(f, ProjectDB):
for each in f.get_all():
each = unicode_obj(each)
logging.info("projectdb: %s", each['name'])
t.drop(each['name'])
t.insert(each['name'], each)
elif isinstance(f, TaskDB):
pool = Pool(pool)
pool.map(
lambda x, f=from_connection, t=to_connection: taskdb_migrating(x, f, t),
f.projects)
elif isinstance(f, ResultDB):
pool = Pool(pool)
pool.map(
lambda x, f=from_connection, t=to_connection: resultdb_migrating(x, f, t),
f.projects)
示例3: set_compression_pool_size
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import ThreadPool [as 别名]
def set_compression_pool_size(pool_size):
"""
Set the size of the compression workers thread pool.
If the pool is already created, it waits until all jobs are finished, and then proceeds with setting the new size.
Parameters
----------
pool_size : `int`
The size of the pool (must be a positive integer)
Returns
-------
`None`
"""
pool_size = int(pool_size)
if pool_size < 1:
raise ValueError("The compression thread pool size cannot be of size {}".format(pool_size))
global _compress_thread_pool
if _compress_thread_pool is not None:
_compress_thread_pool.close()
_compress_thread_pool.join()
_compress_thread_pool = ThreadPool(pool_size)
示例4: test_multi_thread_string_io_read_csv
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import ThreadPool [as 别名]
def test_multi_thread_string_io_read_csv(all_parsers):
# see gh-11786
parser = all_parsers
max_row_range = 10000
num_files = 100
bytes_to_df = [
"\n".join(
["%d,%d,%d" % (i, i, i) for i in range(max_row_range)]
).encode() for _ in range(num_files)]
files = [BytesIO(b) for b in bytes_to_df]
# Read all files in many threads.
pool = ThreadPool(8)
results = pool.map(parser.read_csv, files)
first_result = results[0]
for result in results:
tm.assert_frame_equal(first_result, result)
示例5: destroy_vms
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import ThreadPool [as 别名]
def destroy_vms(self, auth):
"""
Destroy all the VMs
"""
delete_list = list(reversed(self.get_vm_list()))
exceptions = []
if Config.MAX_SIMULTANEOUS_LAUNCHES > 1:
pool = ThreadPool(processes=Config.MAX_SIMULTANEOUS_LAUNCHES)
pool.map(
lambda vm: vm.delete(delete_list, auth, exceptions),
delete_list
)
pool.close()
else:
# If IM server is the first VM, then it will be the last destroyed
for vm in delete_list:
vm.delete(delete_list, auth, exceptions)
if exceptions:
msg = ""
for e in exceptions:
msg += str(e) + "\n"
raise Exception("Error destroying the infrastructure: \n%s" % msg)
示例6: decode
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import ThreadPool [as 别名]
def decode(self, session, low=0, high=100000, num_thread=10):
corpus_info = session.corpus_info()
high = min(corpus_info["num_sentences"] - 1, high)
if low >= high:
return
t0 = time.time()
if num_thread > 1:
with Pool(10) as p:
p.map(
partial(self._decode_one, session),
[sent_id for sent_id in range(low, high + 1)]
)
else:
for sent_id in range(low, high + 1):
self._decode_one(session, sent_id)
print(f'Finished {low} to {high} in {time.time() - t0}s')
示例7: test_multithread_stringio_read_csv
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import ThreadPool [as 别名]
def test_multithread_stringio_read_csv(self):
# see gh-11786
max_row_range = 10000
num_files = 100
bytes_to_df = [
'\n'.join(
['%d,%d,%d' % (i, i, i) for i in range(max_row_range)]
).encode() for j in range(num_files)]
files = [BytesIO(b) for b in bytes_to_df]
# read all files in many threads
pool = ThreadPool(8)
results = pool.map(self.read_csv, files)
first_result = results[0]
for result in results:
tm.assert_frame_equal(first_result, result)
示例8: downloadAllFuturesDailyBar
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import ThreadPool [as 别名]
def downloadAllFuturesDailyBar(self):
"""下载所有期货的主力合约日行情"""
start = time()
print( u'开始下载所有期货的主力合约日行情')
productSymbolSet = self.readFuturesProductSymbol()
print( u'代码列表读取成功,产品代码:%s' %productSymbolSet)
# 这里也测试了线程池,但可能由于下载函数中涉及较多的数据格
# 式转换,CPU开销较大,多线程效率并无显著改变。
#p = ThreadPool(10)
#p.map(self.downloadFuturesDailyBar, productSymbolSet)
#p.close()
#p.join()
for productSymbol in productSymbolSet:
self.downloadFuturesDailyBar(productSymbol+'0000')
print( u'所有期货的主力合约日行情已经全部下载完成, 耗时%s秒' %(time()-start))
#----------------------------------------------------------------------
示例9: download
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import ThreadPool [as 别名]
def download(self, batch):
if self.driver_pool_size:
pool = Pool(processes=self.driver_pool_size)
else:
pool = Pool(processes=default_settings.DRIVER_POOL_SIZE)
results = []
for request in batch:
results.append(pool.apply_async(self.download_one, (request,)))
pool.close()
pool.join()
true_responses = []
for result in results:
true_response = result.get()
true_responses.append(true_response)
FetchManLogger.logger.info(true_response)
return true_responses
示例10: LaunchDaskDistributedClient
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import ThreadPool [as 别名]
def LaunchDaskDistributedClient(self, scheduler_ip=None, scheduler_port=None):
if self.parallel and self.parallel_model == "dask" and self.is_dask_scheduler_initialised is False:
from multiprocessing.pool import ThreadPool
try:
import dask
from dask.distributed import Client, LocalCluster
except ImportError:
raise ImportError("dask is not installed. Install it 'using pip install dask[complete]'")
dask.config.set(pool=ThreadPool(self.no_of_cpu_cores))
# INITIALISE CLUSTER
if scheduler_ip is None:
cluster = LocalCluster(n_workers=self.no_of_cpu_cores, processes=False, threads_per_worker=None)
client = Client(cluster)
else:
client = Client(scheduler_ip)
self.dask_client = client
self.is_dask_scheduler_initialised = True
示例11: __init__
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import ThreadPool [as 别名]
def __init__(self, n, probe_key, ignore_clock_skew=False, metadata_encoding=None, disable_action_probes=False):
# Each QR code takes about 1ms (and updates at 5fps). We do
# our best to ensure the QR is processed in time for the next
# step call (n/16 would put us right at the threshold).
self.pool = pool.ThreadPool(max(int(n/4), 1))
self.qr_pool = pool.ThreadPool(max(int(n/8), 1))
self.lock = threading.RLock()
self.instance_n = [None] * n
self.ignore_clock_skew = ignore_clock_skew
self.disable_action_probes = disable_action_probes
self.metadata_encoding = metadata_encoding
self.update(probe_key=probe_key, metadata_encoding=metadata_encoding)
# only used in flashgames right now
示例12: _check_dataset_file_median_size
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import ThreadPool [as 别名]
def _check_dataset_file_median_size(url_list):
fs, path_list = get_filesystem_and_path_or_paths(url_list)
RECOMMENDED_FILE_SIZE_BYTES = 50 * 1024 * 1024
# TODO: also check file size for other file system.
if isinstance(fs, LocalFileSystem):
pool = ThreadPool(64)
try:
file_size_list = pool.map(os.path.getsize, path_list)
if len(file_size_list) > 1:
mid_index = len(file_size_list) // 2
median_size = sorted(file_size_list)[mid_index] # take the larger one if tie
if median_size < RECOMMENDED_FILE_SIZE_BYTES:
logger.warning('The median size %d B (< 50 MB) of the parquet files is too small. '
'Total size: %d B. Increase the median file size by calling df.repartition(n) or '
'df.coalesce(n), which might help improve the performance. Parquet files: %s, ...',
median_size, sum(file_size_list), url_list[0])
finally:
pool.close()
pool.join()
示例13: celeba_loader
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import ThreadPool [as 别名]
def celeba_loader(batch_size, normalize=True, num_child=4, seed=0, workers=8):
rng = np.random.RandomState(seed)
images = glob.glob(images_path)
with Pool(workers) as p:
while True:
rng.shuffle(images)
for s in range(0, len(images), batch_size):
e = s + batch_size
batch_names = images[s:e]
batch_images = p.map(_load_image, batch_names)
batch_images = np.stack(batch_images)
if normalize:
batch_images = batch_images / 127.5 - 1.
# To be sure
batch_images = np.clip(batch_images, -1., 1.)
# Yield the same batch num_child times since the images will be consumed
# by num_child different child generators
for i in range(num_child):
yield batch_images
示例14: init_pool
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import ThreadPool [as 别名]
def init_pool(self, worker_count):
return ThreadPool(worker_count)
示例15: Pool
# 需要导入模块: from multiprocessing import pool [as 别名]
# 或者: from multiprocessing.pool import ThreadPool [as 别名]
def Pool(processes=None, initializer=None, initargs=()):
from multiprocessing.pool import ThreadPool
return ThreadPool(processes, initializer, initargs)