本文整理汇总了Python中queue.qsize方法的典型用法代码示例。如果您正苦于以下问题:Python queue.qsize方法的具体用法?Python queue.qsize怎么用?Python queue.qsize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类queue
的用法示例。
在下文中一共展示了queue.qsize方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: grab
# 需要导入模块: import queue [as 别名]
# 或者: from queue import qsize [as 别名]
def grab(cam, queue, width, height, fps):
global running
capture = cv2.VideoCapture(cam)
capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
capture.set(cv2.CAP_PROP_FPS, fps)
while(running):
frame = {}
capture.grab()
retval, img = capture.retrieve(0)
frame["img"] = img
frame["1"] = config["1"]
frame["2"] = config["2"]
blur = get_blur(img, 0.05)
frame["blur"] = blur
if queue.qsize() < 10:
queue.put(frame)
else:
print(queue.qsize())
示例2: producer
# 需要导入模块: import queue [as 别名]
# 或者: from queue import qsize [as 别名]
def producer(pool, queue, submap_ft, refmap_ft, fname, particles,
sx, sy, s, a, apix, coefs_method, r, nr, fftthreads=1, crop=None, pfac=2):
log = logging.getLogger('root')
log.debug("Producing %s" % fname)
zreader = mrc.ZSliceReader(particles[star.UCSF.IMAGE_ORIGINAL_PATH].iloc[0])
for i, ptcl in particles.iterrows():
log.debug("Produce %d@%s" % (ptcl[star.UCSF.IMAGE_ORIGINAL_INDEX], ptcl[star.UCSF.IMAGE_ORIGINAL_PATH]))
# p1r = mrc.read_imgs(stack[i], idx[i] - 1, compat="relion")
p1r = zreader.read(ptcl[star.UCSF.IMAGE_ORIGINAL_INDEX])
log.debug("Apply")
ri = pool.apply_async(
subtract_outer,
(p1r, ptcl, submap_ft, refmap_ft, sx, sy, s, a, apix, coefs_method, r, nr),
{"fftthreads": fftthreads, "crop": crop, "pfac": pfac})
log.debug("Put")
queue.put((ptcl[star.UCSF.IMAGE_INDEX], ri), block=True)
log.debug("Queue for %s is size %d" % (ptcl[star.UCSF.IMAGE_ORIGINAL_PATH], queue.qsize()))
zreader.close()
log.debug("Put poison pill")
queue.put((-1, None), block=True)
示例3: consumer
# 需要导入模块: import queue [as 别名]
# 或者: from queue import qsize [as 别名]
def consumer(queue, stack, apix=1.0, iothreads=None):
log = logging.getLogger('root')
with mrc.ZSliceWriter(stack, psz=apix) as zwriter:
while True:
log.debug("Get")
i, ri = queue.get(block=True)
log.debug("Got %d, queue for %s is size %d" %
(i, stack, queue.qsize()))
if i == -1:
break
new_image = ri.get()
log.debug("Result for %d was shape (%d,%d)" %
(i, new_image.shape[0], new_image.shape[1]))
zwriter.write(new_image)
queue.task_done()
log.debug("Wrote %d to %d@%s" % (i, zwriter.i, stack))
if iothreads is not None:
iothreads.release()
示例4: myPublisher
# 需要导入模块: import queue [as 别名]
# 或者: from queue import qsize [as 别名]
def myPublisher(queue):
while not queue.full():
queue.put(1)
print("{} Appended 1 to queue: {}".format(threading.current_thread(), queue.qsize()))
time.sleep(1)
示例5: mySubscriber
# 需要导入模块: import queue [as 别名]
# 或者: from queue import qsize [as 别名]
def mySubscriber(queue):
while True:
item = queue.get()
if item is None:
break
print("{} removed {} from the queue".format(threading.current_thread(), item))
print("Queue Size is now: {}".format(queue.qsize()))
queue.task_done()
示例6: _log_process_queue_event
# 需要导入模块: import queue [as 别名]
# 或者: from queue import qsize [as 别名]
def _log_process_queue_event(queue, event):
"""Log process queue event."""
operation = event.get("operation", "unknown")
provider = event.get("provider")
name = provider.name if provider else "unknown"
LOG.info(f"Adding operation {operation} for {name} to process queue (size: {queue.qsize()})")
示例7: __del__
# 需要导入模块: import queue [as 别名]
# 或者: from queue import qsize [as 别名]
def __del__(self):
self.__threadrun=False
#clear queue
while self._queue.qsize() > 0:
self._queue.get_nowait()
示例8: remove_client
# 需要导入模块: import queue [as 别名]
# 或者: from queue import qsize [as 别名]
def remove_client(self, clientID):
txThread=self._thread.pop(clientID)
txThread.stop()
queue=self._rxqueue.pop(clientID)
while queue.qsize() > 0:
queue.get_nowait()
queue=self._txqueue.pop(clientID)
while queue.qsize() > 0:
queue.get_nowait()
self._logger.info("Client-ID:{0}; removed; number of clients:{1}".format(clientID, self._clientcounter))
示例9: flush
# 需要导入模块: import queue [as 别名]
# 或者: from queue import qsize [as 别名]
def flush(self):
"""Forces a flush from the internal queue to the server"""
queue = self.queue
size = queue.qsize()
queue.join()
self.log.debug('successfully flushed {0} items.'.format(size))
示例10: enqueue_data
# 需要导入模块: import queue [as 别名]
# 或者: from queue import qsize [as 别名]
def enqueue_data(queue, capacity):
np.random.seed()
gen = datagen()
while True:
try:
data = gen.gen_srnet_data_with_background()
except Exception as e:
pass
if queue.qsize() < capacity:
queue.put(data)
示例11: get_queue_size
# 需要导入模块: import queue [as 别名]
# 或者: from queue import qsize [as 别名]
def get_queue_size(self):
return self.queue.qsize()
示例12: dequeue_batch
# 需要导入模块: import queue [as 别名]
# 或者: from queue import qsize [as 别名]
def dequeue_batch(self, batch_size, data_shape):
while self.queue.qsize() < batch_size:
pass
i_t_batch, i_s_batch = [], []
t_sk_batch, t_t_batch, t_b_batch, t_f_batch = [], [], [], []
mask_t_batch = []
for i in range(batch_size):
i_t, i_s, t_sk, t_t, t_b, t_f, mask_t = self.dequeue_data()
i_t_batch.append(i_t)
i_s_batch.append(i_s)
t_sk_batch.append(t_sk)
t_t_batch.append(t_t)
t_b_batch.append(t_b)
t_f_batch.append(t_f)
mask_t_batch.append(mask_t)
w_sum = 0
for t_b in t_b_batch:
h, w = t_b.shape[:2]
scale_ratio = data_shape[0] / h
w_sum += int(w * scale_ratio)
to_h = data_shape[0]
to_w = w_sum // batch_size
to_w = int(round(to_w / 8)) * 8
to_size = (to_w, to_h) # w first for cv2
for i in range(batch_size):
i_t_batch[i] = cv2.resize(i_t_batch[i], to_size)
i_s_batch[i] = cv2.resize(i_s_batch[i], to_size)
t_sk_batch[i] = cv2.resize(t_sk_batch[i], to_size, interpolation=cv2.INTER_NEAREST)
t_t_batch[i] = cv2.resize(t_t_batch[i], to_size)
t_b_batch[i] = cv2.resize(t_b_batch[i], to_size)
t_f_batch[i] = cv2.resize(t_f_batch[i], to_size)
mask_t_batch[i] = cv2.resize(mask_t_batch[i], to_size, interpolation=cv2.INTER_NEAREST)
# eliminate the effect of resize on t_sk
t_sk_batch[i] = skeletonization.skeletonization(mask_t_batch[i], 127)
i_t_batch = np.stack(i_t_batch)
i_s_batch = np.stack(i_s_batch)
t_sk_batch = np.expand_dims(np.stack(t_sk_batch), axis = -1)
t_t_batch = np.stack(t_t_batch)
t_b_batch = np.stack(t_b_batch)
t_f_batch = np.stack(t_f_batch)
mask_t_batch = np.expand_dims(np.stack(mask_t_batch), axis = -1)
i_t_batch = i_t_batch.astype(np.float32) / 127.5 - 1.
i_s_batch = i_s_batch.astype(np.float32) / 127.5 - 1.
t_sk_batch = t_sk_batch.astype(np.float32) / 255.
t_t_batch = t_t_batch.astype(np.float32) / 127.5 - 1.
t_b_batch = t_b_batch.astype(np.float32) / 127.5 - 1.
t_f_batch = t_f_batch.astype(np.float32) / 127.5 - 1.
mask_t_batch = mask_t_batch.astype(np.float32) / 255.
return [i_t_batch, i_s_batch, t_sk_batch, t_t_batch, t_b_batch, t_f_batch, mask_t_batch]