本文整理汇总了Python中Queue.LifoQueue类的典型用法代码示例。如果您正苦于以下问题:Python LifoQueue类的具体用法?Python LifoQueue怎么用?Python LifoQueue使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了LifoQueue类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _put_conn
def _put_conn(self, host, port, sock):
u""" 将连接添加回连接池
会检查连接状态,不正常的连接会被抛弃。
"""
if hasattr(self.sock_mod, "get_display_name"):
sock_name = self.sock_mod.get_display_name()
else:
sock_name = None
sock_info = 'sock_mod:%s host:%s port:%s' % (sock_name, host, port)
if sock:
if is_connection_dropped(sock):
logging.debug(u'已关闭连接无法添加回连接池。%s' % sock_info)
try:
sock.close()
except:
pass
else:
with self.lock:
site_connes = self.site_dict.get(u'%s:%s' % (host, port), None)
if site_connes is None:
site_connes = LifoQueue(self.max_site_conn)
try:
site_connes.put(sock)
logging.debug(u'添加连接回连接池。 %s' % sock_info)
except Full:
logging.debug(u'连接池满. %s' % sock_info)
try:
sock.close()
except:
pass
return
self.site_dict[u'%s:%s' % (host, port)] = site_connes
示例2: __init__
def __init__(self, *args):
super(B2BucketThreaded, self).__init__( *args)
num_threads=50
self.queue = LifoQueue(num_threads*2)
self.file_locks = defaultdict(Lock)
self.running = True
self.threads = []
print "Thread ",
for i in xrange(num_threads):
t = threading.Thread(target=self._file_updater)
t.start()
self.threads.append(t)
print ".",
print
self.pre_queue_lock = Lock()
self.pre_queue_running = True
self.pre_queue = LifoQueue(num_threads*2)
self.pre_file_dict = {}
self.pre_thread = threading.Thread(target=self._prepare_update)
self.pre_thread.start()
示例3: ConnectionPool
class ConnectionPool(object):
def __init__(
self, connection_class=Connection,
max_connections=1024, timeout=5, **connection_kwargs):
self.connection_class = connection_class
self.max_connections = max_connections
self.connection_kwargs = connection_kwargs
self.timeout = timeout
self.connections = []
self.reset()
def ensure_safe(self):
if self.pid != os.getpid():
with self.ensure_lock: # lock for concurrent threadings
if self.pid == os.getpid(): # double check
return
self.disconnect()
self.reset()
def get_connection(self):
self.ensure_safe()
try:
connection = self.queue.get(block=True, timeout=self.timeout)
except Empty:
raise ConnectionError('connection pool is full')
if not connection:
connection = self.make_connection()
return connection
def make_connection(self):
connection = self.connection_class(**self.connection_kwargs)
self.connections.append(connection)
return connection
def release(self, connection):
self.ensure_safe()
if connection.pid != self.pid:
return
try:
self.queue.put_nowait(connection)
except Full:
pass
def reset(self):
self.pid = os.getpid()
self.ensure_lock = threading.Lock()
self.disconnect()
# LifoQueue make use of released connections
self.queue = LifoQueue(self.max_connections)
self.connections = []
while True:
try:
self.queue.put_nowait(None)
except Full:
break
def disconnect(self):
for connection in self.connections:
connection.disconnect()
示例4: DummyMessageHandler
class DummyMessageHandler(MessageHandler):
# TODO(steffen): locking
def __init__(self):
MessageHandler.__init__(self)
self._messages = LifoQueue()
self._devices = []
def register(self, device):
self._devices.append(device)
def read_message(self):
return self._messages.get()
def write_message_from_device(self, message):
self._messages.put(message)
def write_message(self, message):
for d in self._devices:
d.handle_message(message)
def has_messages(self):
for d in self._devices:
d.loop()
return not self._messages.empty()
def stop(self):
pass
示例5: PooledIncomingQueue
class PooledIncomingQueue(IncomingQueue):
def init_queues(self, n=5, buffsize=0, maxsize=1000*1000*1000):
maxsize = maxsize / n
self.write_executor = ThreadPoolExecutor(poolsize=1, queuesize=100)
self.rqfile = FileDequeue(self.qdir, reader=FPSortingQueueFileReader)
#self.rqfile = DummyFileDequeue(self.qdir)
self.qfiles = [FileEnqueue(self.qdir, suffix=str(i),
maxsize=maxsize,
buffer=buffsize,
executor=self.write_executor)
for i in range(n)]
self.avail = LifoQueue()
for q in self.qfiles:
self.avail.put(q)
def shutdown(self):
super(PooledIncomingQueue, self).shutdown()
self.write_executor.shutdown()
def add(self, curis):
processed = 0
t0 = time.time()
enq = self.avail.get()
t = time.time() - t0
if t > 0.1: logging.warn('self.avail.get() %.4f', t)
try:
enq.queue(curis)
self.addedcount += len(curis)
processed += len(curis)
return dict(processed=processed)
finally:
t0 = time.time()
self.avail.put(enq)
t = time.time() - t0
if t > 0.1: logging.warn('slow self.avail.put() %.4f', t)
示例6: __init__
def __init__(self, url):
# Direct class call `threading.Thread` instead of `super()` for python2 capability
threading.Thread.__init__(self)
self.lv_url = url
self._lilo_head_pool = LifoQueue()
self._lilo_jpeg_pool = LifoQueue()
self.header = None
self.frameinfo = []
示例7: lifo_queue_usage
def lifo_queue_usage():
from Queue import LifoQueue
lifo_queue = LifoQueue()
lifo_queue.put(1)
lifo_queue.put(2)
print lifo_queue.get()
print lifo_queue.get()
示例8: stack
class stack():
def __init__(self):
self.s = LifoQueue()
def push(self, x):
self.s.put(x)
def pop(self):
return self.s.get()
def empty(self):
return self.s.empty()
示例9: dfSearch
def dfSearch(start, actions, goalTest, depthLimit=False):
"""Depth-First Search"""
queue = LifoQueue()
queue.put(start)
while True:
if queue.empty():
return node
node = queue.get()
if goalTest(node):
return node
if (node.depth <= depthLimit) or (depthLimit is False):
queue = node.expand(queue, actions)
示例10: __init__
def __init__(self, parent, notify=None):
QObject.__init__(self, parent)
self.count = 0
self.last_saved = -1
self.requests = LifoQueue()
self.notify_requests = LifoQueue()
self.notify_data = notify
t = Thread(name='save-thread', target=self.run)
t.daemon = True
t.start()
t = Thread(name='notify-thread', target=self.notify_calibre)
t.daemon = True
t.start()
self.status_widget = w = SaveWidget(parent)
self.start_save.connect(w.start, type=Qt.QueuedConnection)
self.save_done.connect(w.stop, type=Qt.QueuedConnection)
示例11: __init__
def __init__(self,
stream_func,
catchup = True,
last_known_block_ref=None,
block_cache = None,
event_map_fn = None,
max_retry=20):
if block_cache is None:
block_cache = get_block_cache()
self.max_retry = max_retry
self.cache = block_cache
self.stream_func = stream_func
self.block_ref_queue = Queue()
self.incoming_event_queue = Queue()
self.block_replay_stack = LifoQueue()
self.catchup_begin = threading.Event()
self.catchup_complete = threading.Event()
self.cancel_flag = threading.Event()
self.should_catchup = catchup
self.last_known_block_ref = ref_base58(last_known_block_ref)
if event_map_fn is None:
self.event_map_fn = lambda x: x
else:
self.event_map_fn = event_map_fn
self.catchup_thread = threading.Thread(
name='blockchain-catchup',
target=self._perform_catchup)
self.incoming_event_thread = threading.Thread(
name='journal-stream-listener',
target=self._receive_incoming_events)
self._event_iterator = self._event_stream()
示例12: __init__
def __init__(self, max_connections=10, block_timeout=5,
**kwds):
self.max_connections = max_connections
self.block_timeout = block_timeout
# kwds is being sent directly to dbapi, so we pop
self._pool_id = kwds.pop('pool_id')
self._logobj = kwds.pop('logobj')
self.kwds = kwds
# make sure max_connections is valid
is_valid = isinstance(max_connections, int) and \
max_connections > 0
if not is_valid:
raise ValueError('max_connections must be a positive int')
# if process id is changed, we will close all connections,
# and reinstantiate this object
self._pid = os.getpid()
# this is where we define the pool, and fill it with None values
self._pool = LifoQueue(max_connections)
while True:
try:
self._pool.put_nowait(None)
except Full:
break
# open connections
self._connections = []
示例13: __init__
def __init__(self, canvas):
self.providers={'Bing': self.bing_setup, 'ArcGIS': self.arcgis_setup, 'MapQuest': self.mq_setup }
self.canvas=canvas
self.imageryprovider=None
self.provider_base=None
self.provider_url=None
self.provider_logo=None # (filename, width, height)
self.provider_levelmin=self.provider_levelmax=0
self.placementcache={} # previously created placements (or None if image couldn't be loaded), indexed by quadkey.
# placement may not be laid out if image is still being fetched.
self.tile=(0,999) # X-Plane 1x1degree tile - [lat,lon] of SW
self.loc=None
self.dist=0
self.filecache=Filecache()
# Setup a pool of worker threads
self.workers=[]
self.q=LifoQueue()
for i in range(Imagery.connections):
t=threading.Thread(target=self.worker)
t.daemon=True # this doesn't appear to work for threads blocked on Queue
t.start()
self.workers.append(t)
示例14: __init__
def __init__(self, download_folder):
self.download_folder = download_folder
self.log = logging.getLogger('Ydl')
self.processes = LifoQueue()
self.watcher = threading.Thread(target=self._cleanup, args=(self.processes, self.log))
self.log.info('Starting download watcher process')
self.watcher.start()
示例15: PooledEnqueue
class PooledEnqueue(object):
def __init__(self, qdir, n=5, maxsize=1000*1000*1000, **qargs):
maxsize = maxsize / n
self.qdir = qdir
self.write_executor = ThreadPoolExecutor(poolsize=1, queuesize=100)
self.queues = [FileEnqueue(self.qdir, suffix=str(i),
maxsize=maxsize,
executor=self.write_executor,
**qargs)
for i in range(n)]
self.avail = LifoQueue()
for q in self.queues:
self.avail.put(q)
self.addedcount = 0
def get_status(self):
qq = [q.get_status() for q in self.queues]
r = dict(
buffered=sum(s['buffered'] for s in qq),
pending=sum(s['pending'] for s in qq),
queues=qq)
return r
def _flush(self):
for q in self.queues:
q._flush()
def close(self):
for q in self.queues:
q.close()
self.write_executor.shutdown()
def queue(self, curis):
t0 = time.time()
enq = self.avail.get()
t = time.time() - t0
if t > 0.1:
logging.warn('self.avail.get() %.4f', t)
try:
enq.queue(curis)
self.addedcount += len(curis)
finally:
t0 = time.time()
self.avail.put(enq)
t = time.time() - t0
if t > 0.1:
logging.warn('slow self.avail.put() %.4f', t)