本文整理汇总了Python中eventlet.semaphore.Semaphore.locked方法的典型用法代码示例。如果您正苦于以下问题:Python Semaphore.locked方法的具体用法?Python Semaphore.locked怎么用?Python Semaphore.locked使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类eventlet.semaphore.Semaphore
的用法示例。
在下文中一共展示了Semaphore.locked方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Pool
# 需要导入模块: from eventlet.semaphore import Semaphore [as 别名]
# 或者: from eventlet.semaphore.Semaphore import locked [as 别名]
class Pool(object):
def __init__(self, min_size=0, max_size=4, track_events=False):
if min_size > max_size:
raise ValueError('min_size cannot be bigger than max_size')
self.max_size = max_size
self.sem = Semaphore(max_size)
self.procs = proc.RunningProcSet()
if track_events:
self.results = coros.queue()
else:
self.results = None
def resize(self, new_max_size):
""" Change the :attr:`max_size` of the pool.
If the pool gets resized when there are more than *new_max_size*
coroutines checked out, when they are returned to the pool they will be
discarded. The return value of :meth:`free` will be negative in this
situation.
"""
max_size_delta = new_max_size - self.max_size
self.sem.counter += max_size_delta
self.max_size = new_max_size
@property
def current_size(self):
""" The number of coroutines that are currently executing jobs. """
return len(self.procs)
def free(self):
""" Returns the number of coroutines that are available for doing
work."""
return self.sem.counter
def execute(self, func, *args, **kwargs):
"""Execute func in one of the coroutines maintained
by the pool, when one is free.
Immediately returns a :class:`~eventlet.proc.Proc` object which can be
queried for the func's result.
>>> pool = Pool()
>>> task = pool.execute(lambda a: ('foo', a), 1)
>>> task.wait()
('foo', 1)
"""
# if reentering an empty pool, don't try to wait on a coroutine freeing
# itself -- instead, just execute in the current coroutine
if self.sem.locked() and api.getcurrent() in self.procs:
p = proc.spawn(func, *args, **kwargs)
try:
p.wait()
except:
pass
else:
self.sem.acquire()
p = self.procs.spawn(func, *args, **kwargs)
# assuming the above line cannot raise
p.link(lambda p: self.sem.release())
if self.results is not None:
p.link(self.results)
return p
execute_async = execute
def _execute(self, evt, func, args, kw):
p = self.execute(func, *args, **kw)
p.link(evt)
return p
def waitall(self):
""" Calling this function blocks until every coroutine
completes its work (i.e. there are 0 running coroutines)."""
return self.procs.waitall()
wait_all = waitall
def wait(self):
"""Wait for the next execute in the pool to complete,
and return the result."""
return self.results.wait()
def waiting(self):
"""Return the number of coroutines waiting to execute.
"""
if self.sem.balance < 0:
return -self.sem.balance
else:
return 0
def killall(self):
""" Kill every running coroutine as immediately as possible."""
return self.procs.killall()
def launch_all(self, function, iterable):
"""For each tuple (sequence) in *iterable*, launch ``function(*tuple)``
in its own coroutine -- like ``itertools.starmap()``, but in parallel.
Discard values returned by ``function()``. You should call
``wait_all()`` to wait for all coroutines, newly-launched plus any
previously-submitted :meth:`execute` or :meth:`execute_async` calls, to
#.........这里部分代码省略.........