本文整理汇总了Python中multiprocessing.pool.ThreadPool.join方法的典型用法代码示例。如果您正苦于以下问题:Python ThreadPool.join方法的具体用法?Python ThreadPool.join怎么用?Python ThreadPool.join使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.pool.ThreadPool
的用法示例。
在下文中一共展示了ThreadPool.join方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: dowload_person
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import join [as 别名]
def dowload_person(person_url):
print 'start to downlaod person %s\n'%(person_url)
person_pic_url = get_person_pic_url_Set(person_url)
pool = ThreadPool(8)
pool.map(download_pic,person_pic_url)
pool.close()
pool.join()
示例2: getMessagesBySource
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import join [as 别名]
def getMessagesBySource(self, source, batch_mode=False):
"""
Returns the messages for the given source, including messages
from the configured builder (if available) and static checks
Extra arguments are
"""
self._setupEnvIfNeeded()
if self._USE_THREADS:
records = []
pool = ThreadPool()
static_check = pool.apply_async(
getStaticMessages, args=(source.getSourceContent().split('\n'), ))
if self._isBuilderCallable():
builder_check = pool.apply_async(self._getBuilderMessages,
args=[source, batch_mode])
records += builder_check.get()
records += static_check.get()
pool.terminate()
pool.join()
else:
records = getStaticMessages(source.getSourceContent().split('\n'))
if self._isBuilderCallable():
records += self._getBuilderMessages(source, batch_mode)
self._saveCache()
return records
示例3: local_job_runner
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import join [as 别名]
def local_job_runner(cmds_list, num_threads, throw_error=True):
"""
Execute a list of cmds locally using thread pool with at most
num_threads threads, wait for all jobs to finish before exit.
If throw_error is True, when any job failed, raise RuntimeError.
If throw_error is False, return a list of cmds that failed.
Parameters:
cmds_list - cmds that will be executed in ThreadPool
num_threads - number of threads that will be used in the ThreadPool
throw_error - whether or not to throw RuntimeError when any of cmd failed.
rescue - whether or not to rescue this job
rescue_times - maximum number of rescue times
"""
run_cmd_in_shell = lambda x: backticks(x, merge_stderr=True)
try:
pool = ThreadPool(processes=num_threads)
rets = pool.map(run_cmd_in_shell, cmds_list)
pool.close()
pool.join()
except subprocess.CalledProcessError:
pass
failed_cmds = [cmds_list[i] for i in range(0, len(cmds_list)) if rets[i][1] != 0]
failed_cmds_out = [rets[i][0] for i in range(0, len(cmds_list)) if rets[i][1] != 0]
if throw_error and len(failed_cmds) > 0:
errmsg = "\n".join(["CMD failed: %s, %s" % (cmd, out)
for (cmd, out) in zip(failed_cmds, failed_cmds_out)])
raise RuntimeError(errmsg)
else:
return failed_cmds
示例4: read
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import join [as 别名]
def read(self, sftppath, localPath = None, numParallelConnections = 1):
if localPath is None:
localPath = os.getcwd() # local path - can be changed later
sftp = paramiko.SFTPClient.from_transport(self.transport)
if (numParallelConnections > 1):
pool = ThreadPool(numParallelConnections)
def getFile(sftppath, localpath):
pconnection = SFTPConnection(self.connectionInfo)
pconnection.connect()
psftp = paramiko.SFTPClient.from_transport(pconnection.transport)
psftp.get(sftppath, localpath)
psftp.close()
pconnection.close()
def recursiveRead(sftp, sftppath, localPath):
fileattr = sftp.lstat(sftppath)
if not stat.S_ISDIR(fileattr.st_mode): #it is a file
if (numParallelConnections > 1):
pool.apply_async(getFile, args= (sftppath, os.path.join(localPath, os.path.basename(sftppath))))
else:
sftp.get(sftppath, os.path.join(localPath, os.path.basename(sftppath)))
else: #it is a directory
try: #creating local directory, using try-catch to handle race conditions
os.makedirs(os.path.join(localPath, os.path.basename(sftppath)))
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
for file in sftp.listdir_attr(sftppath):
recursiveRead(sftp, os.path.join(sftppath, file.filename), os.path.join(localPath, os.path.basename(sftppath)))
recursiveRead(sftp, sftppath, localPath)
sftp.close()
if (numParallelConnections > 1):
pool.close()
pool.join()
示例5: get_for_genres
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import join [as 别名]
def get_for_genres(genres):
genres = set(genres)
playlists = {}
new_genres = set()
for page in xrange(5):
args = []
for g in genres:
args.append((g, page))
try:
pool = ThreadPool(PROCESSES)
pfunc = parse_page
for i, res in enumerate(pool.imap_unordered(pfunc, args)):
genre, page, pl, found = res
print "%d/%d" % (i + 1, len(args))
playlists.update(pl)
new_genres |= found
if not pl:
genres.remove(genre)
except Exception as e:
print e
return playlists, []
finally:
pool.terminate()
pool.join()
return playlists, new_genres
示例6: check_artifact_cache
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import join [as 别名]
def check_artifact_cache(self, vts):
"""Checks the artifact cache for the specified VersionedTargetSets.
Returns a list of the ones that were satisfied from the cache. These don't require building.
"""
if not vts:
return [], []
cached_vts = []
uncached_vts = OrderedSet(vts)
if self._artifact_cache and self.context.options.read_from_artifact_cache:
pool = ThreadPool(processes=6)
res = pool.map(lambda vt: self._artifact_cache.use_cached_files(vt.cache_key),
vts, chunksize=1)
pool.close()
pool.join()
for vt, was_in_cache in zip(vts, res):
if was_in_cache:
cached_vts.append(vt)
uncached_vts.discard(vt)
self.context.log.info('Using cached artifacts for %s' % vt.targets)
vt.update()
else:
self.context.log.info('No cached artifacts for %s' % vt.targets)
return cached_vts, list(uncached_vts)
示例7: bench_compression_comparison
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import join [as 别名]
def bench_compression_comparison(n_chunks, df_length, append_mul, pool_size, pool_step, repeats,
use_raw_lz4, use_HC):
_str = construct_test_data(df_length, append_mul)
chunk_size = len(_str) / 1024 ** 2.0
_strarr = [_str] * n_chunks
# Single threaded
# ---------------
measurements = bench_single(repeats, _strarr, use_HC)
print_results(1, chunk_size, n_chunks, chunk_size*n_chunks, measurements)
single_mean = np.mean(measurements)
# Multi-threaded
# --------------
for sz in range(2, pool_size + 1, pool_step):
if use_raw_lz4:
pool = ThreadPool(sz)
else:
pool = None
c.set_compression_pool_size(sz)
measurements = bench_multi(repeats, _strarr, use_HC, pool=pool)
print_results(sz, chunk_size, n_chunks, chunk_size * n_chunks, measurements, compare=single_mean)
if pool:
pool.close()
pool.join()
print("")
示例8: _send_some_brokers
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import join [as 别名]
def _send_some_brokers(self, requests, ignore_errors=True):
"""
Sends a request to one or more brokers. The responses are returned mapped to the broker that
they were retrieved from. This method uses a thread pool to parallelize sends.
Args:
request (int -> BaseRequest): A dictionary, where keys are integer broker IDs and the values are valid
request objects that inherit from BaseRequest.
Returns:
dict (int -> BaseResponse): A map of broker IDs to response instances (inherited from
BaseResponse). Failed requests are represented with a value of None
"""
results = {}
pool = ThreadPool(processes=self.configuration.broker_threads)
for broker_id in requests:
results[broker_id] = pool.apply_async(self._send_to_broker, (broker_id, requests[broker_id]))
pool.close()
pool.join()
responses = {}
for broker_id in results:
try:
responses[broker_id] = results[broker_id].get()
except ConnectionError:
if ignore_errors:
# Individual broker failures are OK, as we'll represent them with a None value
responses[broker_id] = None
else:
raise
return responses
示例9: _listArtifacts
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import join [as 别名]
def _listArtifacts(self, urls, gavs):
"""
Loads maven artifacts from list of GAVs and tries to locate the artifacts in one of the
specified repositories.
:param urls: repository URLs where the given GAVs can be located
:param gavs: List of GAVs
:returns: Dictionary where index is MavenArtifact object and value is it's repo root URL.
"""
def findArtifact(gav, urls, artifacts):
artifact = MavenArtifact.createFromGAV(gav)
for url in urls:
if maven_repo_util.gavExists(url, artifact):
#Critical section?
artifacts[artifact] = ArtifactSpec(url)
return
logging.warning('Artifact %s not found in any url!', artifact)
artifacts = {}
pool = ThreadPool(maven_repo_util.MAX_THREADS)
for gav in gavs:
pool.apply_async(findArtifact, [gav, urls, artifacts])
# Close the pool and wait for the workers to finnish
pool.close()
pool.join()
return artifacts
示例10: main
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import join [as 别名]
def main():
# Run the Tales
pool = ThreadPool(processes=int(tcfg['Workers'].get('pool_size', 10)))
pool = ThreadPool()
pool.map(worker, tales)
pool.close()
pool.join()
示例11: run
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import join [as 别名]
def run(self):
pool = ThreadPool(self.num_agents)
for idx in range(self.num_agents):
pool.apply_async(self.run_experiement, args=(self.experiment, idx))
pool.close()
pool.join()
示例12: worker
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import join [as 别名]
def worker(self, db, lista):
'''
Metodo per eseguire il processo di ricerca dei plugin in multithread
Multithread method for online search
'''
# Make the Pool of workers
processes = 5
#WARNING: con la fibra posso arrivare a 20 senza errori, con adsl massimo 4 worker!
pool = Pool(processes)
# Open the urls in their own threads and return the results
pluglist = pool.map(onlinePluginSearch, lista)
#close the pool and wait for the work to finish
pool.close()
pool.join()
#parsa il risultato (lista con tuple) e metti tutto in una stringa (result) e aggiorna cache
result = ''
for item in pluglist:
if item[1] !=[]:
for plug in item[1]:
db.updateCache(item[0], plug)
result = result + str(plug) + ','
numbers = result.count(',') + 1
print("Number of available pflugins: %s" % numbers)
print("Adding to policy plugins: 19506,10287,12634 for credential checks and ping target.")
result = result + "19506,10287,12634"
#aggiungo sempre questi 3 plug-in per verificare se il target e' alive
return result
示例13: main
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import join [as 别名]
def main():
good_proxys = []
socket.setdefaulttimeout(10)
with open('proxylist.txt') as f:
proxy_list = f.readlines()
total = len(proxy_list)
pool = ThreadPool(multiprocessing.cpu_count() * 2 + 1)
async_results = []
for index, proxy in enumerate(proxy_list):
if proxy.startswith('http://'):
curr_proxy = proxy[7:].strip()
else:
curr_proxy = proxy.strip()
async_results.append(pool.apply_async(
check_proxy,
args=(curr_proxy, index, total)
))
pool.close()
pool.join()
for result in async_results:
proxy = result.get()
if proxy:
good_proxys.append(proxy)
if not good_proxys:
print 'No proxy are working!'
return
with open('proxy.txt', 'w') as f:
for proxy in good_proxys:
f.write(proxy + '\n')
示例14: run
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import join [as 别名]
def run(self, max_number_of_live_tokens=None, group=None):
group = Pool()
try:
stages = []
in_q = _DummyQueue()
end_in = Event()
if self._filters[0].is_serial:
serial = Lock()
else:
serial = _DummyLock()
if self._filters[0].is_ordered:
out_q = PriorityQueue()
else:
out_q = Queue()
for i, f in enumerate(self._filters):
pass
send_q, recv_q = Queue(), Queue()
group.close()
except:
group.terminate()
finally:
group.join()
示例15: downloadPDFs
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import join [as 别名]
def downloadPDFs(self):
### Download all the files extracted from the metadata
startTime = time.strftime("%c")
# Loop through the CSV
f = open(self.csvpath)
metadata = csv.reader(f, quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL, skipinitialspace=True)
for row in metadata:
pmcid = row[8]
### Check the input is a PMC ID
if 'PMC' in pmcid:
print('Starting thread for: '+pmcid)
pool = Pool(30)
pool.apply_async(self.saveFile, (pmcid,))
pool.close()
pool.join()
else:
print('Something is wrong. '+pmcid+' is not a PMC id')
sys.exit(0)
f.close()
print('Finished downloading all files: start {} end {}.'.format(startTime, time.strftime("%c")))