本文整理汇总了Python中gevent.threadpool.ThreadPool.map_async方法的典型用法代码示例。如果您正苦于以下问题:Python ThreadPool.map_async方法的具体用法?Python ThreadPool.map_async怎么用?Python ThreadPool.map_async使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类gevent.threadpool.ThreadPool
的用法示例。
在下文中一共展示了ThreadPool.map_async方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: TestPool
# 需要导入模块: from gevent.threadpool import ThreadPool [as 别名]
# 或者: from gevent.threadpool.ThreadPool import map_async [as 别名]
class TestPool(TestCase):
__timeout__ = 5
size = 1
def setUp(self):
greentest.TestCase.setUp(self)
self.pool = ThreadPool(self.size)
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x': 3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, range(10)), list(map(sqr, range(10))))
self.assertEqual(pmap(sqr, range(100)), list(map(sqr, range(100))))
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertAlmostEqual(get.elapsed, TIMEOUT1, 1)
def test_async_callback(self):
result = []
res = self.pool.apply_async(sqr, (7, TIMEOUT1,), callback=lambda x: result.append(x))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertAlmostEqual(get.elapsed, TIMEOUT1, 1)
gevent.sleep(0) # let's the callback run
assert result == [49], result
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2))
get = TimingWrapper(res.get)
self.assertRaises(gevent.Timeout, get, timeout=TIMEOUT2)
self.assertAlmostEqual(get.elapsed, TIMEOUT2, 1)
self.pool.join()
def test_imap(self):
it = self.pool.imap(sqr, range(10))
self.assertEqual(list(it), list(map(sqr, range(10))))
it = self.pool.imap(sqr, range(10))
for i in range(10):
self.assertEqual(six.advance_iterator(it), i * i)
self.assertRaises(StopIteration, lambda: six.advance_iterator(it))
it = self.pool.imap(sqr, range(1000))
for i in range(1000):
self.assertEqual(six.advance_iterator(it), i * i)
self.assertRaises(StopIteration, lambda: six.advance_iterator(it))
def test_imap_random(self):
it = self.pool.imap(sqr_random_sleep, range(10))
self.assertEqual(list(it), list(map(sqr, range(10))))
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, range(1000))
self.assertEqual(sorted(it), list(map(sqr, range(1000))))
it = self.pool.imap_unordered(sqr, range(1000))
self.assertEqual(sorted(it), list(map(sqr, range(1000))))
def test_imap_unordered_random(self):
it = self.pool.imap_unordered(sqr_random_sleep, range(10))
self.assertEqual(sorted(it), list(map(sqr, range(10))))
def test_terminate(self):
result = self.pool.map_async(sleep, [0.1] * ((self.size or 10) * 2))
gevent.sleep(0.1)
kill = TimingWrapper(self.pool.kill)
kill()
assert kill.elapsed < 0.5, kill.elapsed
result.join()
def sleep(self, x):
sleep(float(x) / 10.)
return str(x)
def test_imap_unordered_sleep(self):
# testing that imap_unordered returns items in competion order
result = list(self.pool.imap_unordered(self.sleep, [10, 1, 2]))
if self.pool.size == 1:
expected = ['10', '1', '2']
else:
expected = ['1', '2', '10']
self.assertEqual(result, expected)
示例2: handle
# 需要导入模块: from gevent.threadpool import ThreadPool [as 别名]
# 或者: from gevent.threadpool.ThreadPool import map_async [as 别名]
def handle(self, *args, **options):
self.verbose = options['verbosity'] > 1
self.debug = options['debug']
if self.debug:
self.verbose=True
self.full = options['full']
if options['id']:
feeds = Blog.objects.filter(pk=options['id'])
else:
# Fetch all feeds - that are not archived. We do fetch feeds that are not approved,
# to make sure they work.
feeds = Blog.objects.filter(archived=False)
# Fan out the fetching itself
fetchers = [FeedFetcher(f, self.trace) for f in feeds]
num = len(fetchers)
pool = ThreadPool(options['parallelism'])
pr = pool.map_async(self._fetch_one_feed, fetchers)
while not pr.ready():
gevent.sleep(1)
self.trace("Fetching feeds (%s/%s done), please wait..." % (num-pool.task_queue.unfinished_tasks, num))
total_entries = 0
# Fetching was async, but results processing will be sync. Don't want to deal with
# multithreaded database connections and such complications.
try:
with transaction.atomic():
for feed, results in pr.get():
if isinstance(results, ParserGotRedirect):
# Received a redirect. If this is a redirect for exactly the same URL just
# from http to https, special case this and allow it. For any other redirect,
# we don't follow it since it might no longer be a properly filtered feed
# for example.
if results.url == feed.feedurl:
# Redirect to itself! Should never happen, of course.
AggregatorLog(feed=feed, success=False,
info="Feed returned redirect loop to itself!").save()
elif results.url == feed.feedurl.replace('http://', 'https://'):
# OK, update it!
AggregatorLog(feed=feed, success=True,
info="Feed returned redirect to https, updating registration").save()
send_simple_mail(settings.EMAIL_SENDER,
feed.user.email,
"Your blog at Planet PostgreSQL redirected",
u"The blog aggregator at Planet PostgreSQL has picked up a redirect for your blog.\nOld URL: {0}\nNew URL: {1}\n\nThe database has been updated, and new entries will be fetched from the secure URL in the future.\n".format(feed.feedurl, results.url),
sendername="Planet PostgreSQL",
receivername=u"{0} {1}".format(feed.user.first_name, feed.user.last_name),
)
send_simple_mail(settings.EMAIL_SENDER,
settings.NOTIFICATION_RECEIVER,
"Blog redirect detected on Planet PostgreSQL",
u"The blog at {0} by {1}\nis returning a redirect to a https version of itself.\n\nThe database has automatically been updated, and will start fetching using https in the future,\n\n".format(feed.feedurl, feed.user),
sendername="Planet PostgreSQL",
receivername="Planet PostgreSQL Moderators",
)
feed.feedurl = results.url
feed.save()
else:
AggregatorLog(feed=feed, success=False,
info="Feed returned redirect (http 301)").save()
elif isinstance(results, Exception):
AggregatorLog(feed=feed,
success=False,
info=results).save()
else:
if feed.approved:
had_entries = True
else:
had_entries = feed.has_entries
entries = 0
titles = []
ids = []
for entry in results:
self.trace("Found entry at %s" % entry.link)
# Entry is a post, but we need to check if it's already there. Check
# is done on guid. Some blogs use http and https in the guid, and
# also change between them depending on how the blog is fetched,
# so check for those two explicitly.
if 'http://' in entry.guid:
alternateguid = entry.guid.replace('http://', 'https://')
elif 'https://' in entry.guid:
alternateguid = entry.guid.replace('https://', 'http://')
else:
alternateguid = None
# We check if this entry has been syndicated on any *other* blog as well,
# so we don't accidentally post something more than once.
if not Post.objects.filter(Q(guid=entry.guid) | Q(guid=alternateguid)).exists():
self.trace("Saving entry at %s" % entry.link)
entry.save()
entry.update_shortlink()
AggregatorLog(feed=feed,
success=True,
info="Fetched entry at '%s'" % entry.link).save()
entries += 1
titles.append(entry.title)
ids.append(entry.pk)
total_entries += 1
else:
#.........这里部分代码省略.........