本文整理汇总了Python中multiprocessing.pool.ThreadPool.apply_async方法的典型用法代码示例。如果您正苦于以下问题:Python ThreadPool.apply_async方法的具体用法?Python ThreadPool.apply_async怎么用?Python ThreadPool.apply_async使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.pool.ThreadPool
的用法示例。
在下文中一共展示了ThreadPool.apply_async方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: process_processed
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import apply_async [as 别名]
def process_processed(path, devices, dates, dry):
pool = multiprocessing.Pool(8)
thread_pool = ThreadPool(8)
manager = multiprocessing.Manager()
queue = manager.Queue()
total_size = [0]
file_size_dict = {}
failed = {}
def update_file_size_dict(device, remote_path, local_path, size):
file_size_dict[remote_path] = {
'size': size,
'remote': remote_path,
'local': local_path
}
total_size[0] += size
logger.debug("total_size = %d" % (total_size[0]))
ym = set([(x.year, x.month) for x in dates])
for d in devices:
for year, month in ym:
fpath = 'time/%04d/%02d' % (year, month)
srcpath = os.path.join(BACKEND_PROCESSED_BASE_PATH, d, fpath)
remote_path = '%s:%s' % (BACKEND, srcpath)
outpath = os.path.join(path, d, fpath)
# Make the outpath (if needed)
if not os.path.exists(outpath):
os.makedirs(outpath)
thread_pool.apply_async(get_remote_files, args=(d, remote_path, outpath, dates, update_file_size_dict))
# get_remote_files(d, remote_path, outpath, dates, update_file_size_dict)
thread_pool.close()
thread_pool.join()
total_size = total_size[0]
finished = 0
logger.info("# files: %d" % (len(file_size_dict.keys())))
for k, v in file_size_dict.iteritems():
pool.apply_async(rsync_worker, args=(k, file_size_dict[k]['local'], '-avzupr', dry, queue))
#rsync_worker(k, file_size_dict[k]['local'], '-avzpr', dry, queue)
pool.close()
try:
i = 0
while i < len(file_size_dict.keys()):
path, ret, out, stder = queue.get()
size = file_size_dict[path]['size']
finished += size
i += 1
# pycommons.print_progress(finished, total_size)
# logger.info("Finished: %d/%d" % (i, len(file_size_dict.keys())))
pool.join()
except KeyboardInterrupt:
logger.warning("Terminating ...")
return
示例2: play
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import apply_async [as 别名]
def play(track_id):
from rhapsody.models.common import Image
from multiprocessing.pool import ThreadPool
album_id = plugin.request.args.get('album_id', [False])[0]
duration = plugin.request.args.get('duration', [False])[0]
thumbnail_missing = plugin.request.args.get('thumbnail_missing', [False])[0]
item = dict()
pool = ThreadPool(processes=2)
stream_result = pool.apply_async(lambda: rhapsody.streams.detail(track_id))
if thumbnail_missing:
album_result = pool.apply_async(lambda: rhapsody.albums.detail(album_id))
album = album_result.get()
item['thumbnail'] = album.images[0].get_url(size=Image.SIZE_ORIGINAL)
stream = stream_result.get()
item['path'] = stream.url
plugin.set_resolved_url(item)
started = rhapsody.events.log_playstart(track_id, stream)
rhapsody.events.log_playstop(track_id, stream, started, duration)
pool.close()
pool.join()
示例3: get_data
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import apply_async [as 别名]
def get_data(self):
amazon_service = AmazonService(self.title, self.country)
bluray_service = BlurayService(self.title)
imdb_service = ImdbService(self.title)
tmdb_service = TmdbService(imdb_service.get_id())
omdb_service = OmdbService(imdb_service.get_id())
pool = ThreadPool(processes=self.__THREAD_COUNT)
async_rt_rating = pool.apply_async(omdb_service.get_rt_rating)
async_bluray_rating = pool.apply_async(bluray_service.get_bluray_rating)
async_tech_specs = pool.apply_async(imdb_service.get_tech_spec)
async_artwork = pool.apply_async(tmdb_service.get_artwork)
async_price = pool.apply_async(amazon_service.get_price)
pool.close()
# try:
rt_rating = async_rt_rating.get()
bluray_rating = async_bluray_rating.get()
tech_specs = async_tech_specs.get()
price = async_price.get()
artwork = async_artwork.get()
pool.join()
# except:
# raise ValueError("Oops, something went wrong")
data = {'rt_rating': rt_rating,
'bluray_rating': bluray_rating,
'tech_specs': tech_specs,
'price': price,
'artwork': artwork}
return data
示例4: NewClient
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import apply_async [as 别名]
class NewClient(Client):
""" new client """
def __init__(self, context, poller):
super(NewClient, self).__init__(context, poller)
self._threadn = cv2.getNumberOfCPUs()
self._pool = ThreadPool(processes = self._threadn)
self._pending = deque()
def run_img(self, wait=0.5):
""" run img proc """
img_it = self.fetch_img()
for img in img_it:
while len(self._pending) >0 and self._pending[0].ready():
name, res = self._pending.popleft().get()
cv2.imshow(name, res)
ch = cv2.waitKey(1)
gevent.sleep(wait)
if len(self._pending) < self._threadn:
tasks = [ self._pool.apply_async(detect_edge, ('detect_edge', img.copy())),
self._pool.apply_async(detect_line, ('detect_lineP', img.copy())),
self._pool.apply_async(detect_circle, ('detect_circle', img.copy())),
self._pool.apply_async(detect_face, ('detect_face', img.copy())) ]
[self._pending.append(task) for task in tasks]
示例5: demo
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import apply_async [as 别名]
def demo(args):
""" Demonstrates the Python logging facility. """
cli = argparse.ArgumentParser()
cli.add_argument("--verbose", "-v", action='count', default=ENV_VERBOSITY)
cli.add_argument("--quiet", "-q", action='count', default=0)
args = cli.parse_args(args)
level = verbosity_to_level(args.verbose - args.quiet)
info("new log level: " + str(level))
old_level = set_loglevel(level)
info("old level was: " + str(old_level))
info("printing some messages with different log levels")
spam("rofl")
dbg("wtf?")
info("foo")
warn("WARNING!!!!")
err("that didn't go so well")
crit("pretty critical, huh?")
info("restoring old loglevel")
set_loglevel(old_level)
info("old loglevel restored")
info("running some threaded stuff")
pool = ThreadPool()
for i in range(8):
pool.apply_async(info, ("async message #" + str(i),))
pool.close()
pool.join()
示例6: _listArtifacts
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import apply_async [as 别名]
def _listArtifacts(self, urls, gavs):
"""
Loads maven artifacts from list of GAVs and tries to locate the artifacts in one of the
specified repositories.
:param urls: repository URLs where the given GAVs can be located
:param gavs: List of GAVs
:returns: Dictionary where index is MavenArtifact object and value is it's repo root URL.
"""
def findArtifact(gav, urls, artifacts):
artifact = MavenArtifact.createFromGAV(gav)
for url in urls:
if maven_repo_util.gavExists(url, artifact):
#Critical section?
artifacts[artifact] = ArtifactSpec(url)
return
logging.warning('Artifact %s not found in any url!', artifact)
artifacts = {}
pool = ThreadPool(maven_repo_util.MAX_THREADS)
for gav in gavs:
pool.apply_async(findArtifact, [gav, urls, artifacts])
# Close the pool and wait for the workers to finnish
pool.close()
pool.join()
return artifacts
示例7: test_turn_lights_on_same_time
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import apply_async [as 别名]
def test_turn_lights_on_same_time(self):
""" Test to turn on all lights on at the same time, and then off at the same time """
print "\n"
print "****************************************************"
print "Testing turning on all lights at the same time"
print "****************************************************"
# Find bridges to associate with
lights = {}
lights.update(dr_hue.get_all_lights(self.url, USERNAME))
print "There are %s lights, turning them all on now" % len(lights)
pool = ThreadPool(len(lights) or THREAD_MAX)
for light in lights.keys():
args = (self.url, light, USERNAME)
pool.apply_async(dr_hue.turn_light_on, args=args)
# Wait for all the threads to complete then make sure everything is Kosher
pool.close()
pool.join()
print "Now trying to turn them all off at the same time in 5 seconds"
pool = ThreadPool(len(lights) or THREAD_MAX)
sleep(5)
for light in lights.keys():
args = (self.url, light, USERNAME)
pool.apply_async(dr_hue.turn_light_off, args=args)
# Wait for all the threads to complete then make sure everything is Kosher
pool.close()
pool.join()
示例8: downloadPDFs
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import apply_async [as 别名]
def downloadPDFs(self):
### Download all the files extracted from the metadata
startTime = time.strftime("%c")
# Loop through the CSV
f = open(self.csvpath)
metadata = csv.reader(f, quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL, skipinitialspace=True)
for row in metadata:
pmcid = row[8]
### Check the input is a PMC ID
if 'PMC' in pmcid:
print('Starting thread for: '+pmcid)
pool = Pool(30)
pool.apply_async(self.saveFile, (pmcid,))
pool.close()
pool.join()
else:
print('Something is wrong. '+pmcid+' is not a PMC id')
sys.exit(0)
f.close()
print('Finished downloading all files: start {} end {}.'.format(startTime, time.strftime("%c")))
示例9: getMessagesBySource
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import apply_async [as 别名]
def getMessagesBySource(self, source, batch_mode=False):
"""
Returns the messages for the given source, including messages
from the configured builder (if available) and static checks
Extra arguments are
"""
self._setupEnvIfNeeded()
if self._USE_THREADS:
records = []
pool = ThreadPool()
static_check = pool.apply_async(
getStaticMessages, args=(source.getSourceContent().split('\n'), ))
if self._isBuilderCallable():
builder_check = pool.apply_async(self._getBuilderMessages,
args=[source, batch_mode])
records += builder_check.get()
records += static_check.get()
pool.terminate()
pool.join()
else:
records = getStaticMessages(source.getSourceContent().split('\n'))
if self._isBuilderCallable():
records += self._getBuilderMessages(source, batch_mode)
self._saveCache()
return records
示例10: update
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import apply_async [as 别名]
def update(args=None):
projects = list_projects(False, args.dir)
print("Update in progress...")
if args.j:
pool = Pool(args.j)
def worker(p):
if p.is_behind():
p.update()
print("{} updated".format(p.name))
for p in projects:
pool.apply_async(worker, (p,))
pool.close()
pool.join()
else:
for p in projects:
if p.is_behind():
p.update()
print("{} updated".format(p.name))
print("Update done")
示例11: parse_dir
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import apply_async [as 别名]
def parse_dir(dir):
ignores = Parser.load_ignores(dir)
ignores.extend([".svn", ".hg", ".git"])
def callback(res):
dependencies.extend(res)
def is_ignored(res, is_dir=False):
if is_dir:
res = res + "/"
for i in ignores:
if fnmatch.fnmatch(res, i) or res.startswith(i):
return True
return False
def find_ignored(reslist, is_dir=False):
return [res for res in reslist if is_ignored(res, is_dir)]
pool = ThreadPool(processes=Parser.concurrency)
dependencies = []
for root, dirs, files in scandir.walk(dir):
for d in find_ignored(dirs, True):
logging.debug("%s is blacklisted" % d)
dirs.remove(d)
for f in find_ignored(files):
logging.debug("%s is blacklisted" % d)
files.remove(f)
for name in files:
pool.apply_async(Parser.parse_file, args = (os.path.join(root, name),), callback = callback)
pool.close()
pool.join()
return dependencies
示例12: TaskManager
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import apply_async [as 别名]
class TaskManager(object):
def __init__(self, processes):
self.pool = ThreadPool(processes=processes)
self.workers = threading.Semaphore(processes)
self.counter = 0
self.sizes = 0
self.total = 0
self.progress_line = 0
self.progress_bar = ''
def new(self,task, arg):
self.workers.acquire()
self.sizes += 1
self.pool.apply_async(task, args=(arg, ), callback=self.done)
def done(self, args):
self.workers.release()
self.sizes -= 1
self.setCount(1)
def setTotal(self,total):
self.total = total
return self
def getTotal(self):
return self.total
def setCount(self,num):
self.counter += num
return self
def getCount(self):
return self.counter
#
# The progress() of the outdated abandoned
# Using the ProgressBar showed the progress bar
# Example:
# progressBar = ProgressBar(100,"#")
# for i in range(0,99):
# progressBar.progress(i)
def progress(self):
if self.getCount() != 0:
self.percent = int((float(self.getCount())/(self.getTotal()-1))*100)
blockcount = int(self.percent/2)
if blockcount > self.progress_line:
self.progress_bar += '#'
self.progress_line = blockcount
log = str((self.getTotal()))+'||'+str(self.getCount())
log += '||'+self.progress_bar+'->||'+str(self.progress_line)+"%\r"
stdout.write(log)
stdout.flush()
return
def size(self):
return self.sizes
def __len__(self):
return self.sizes
示例13: run
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import apply_async [as 别名]
def run(self):
pool = ThreadPool(self.num_agents)
for idx in range(self.num_agents):
pool.apply_async(self.run_experiement, args=(self.experiment, idx))
pool.close()
pool.join()
示例14: thread
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import apply_async [as 别名]
def thread(host, port, threads, num):
pool = ThreadPool(threads)
for _ in range(num):
pool.apply_async(job, (host, port))
time.sleep(0.001)
pool.close()
pool.join()
示例15: run
# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import apply_async [as 别名]
def run(self, suites):
wrapper = self.config.plugins.prepareTest(suites)
if wrapper is not None:
suites = wrapper
wrapped = self.config.plugins.setOutputStream(self.stream)
if wrapped is not None:
self.stream = wrapped
result = self._makeResult()
size = self.config.options.thread_pool
if size < 0:
size = cpu_count()
pool = ThreadPool(size)
with measure_time(result):
for suite in suites:
pool.apply_async(suite, args=(result,))
pool.close()
pool.join()
self.config.plugins.finalize(result)
return result