當前位置: 首頁>>代碼示例>>Python>>正文


Python queues.Empty方法代碼示例

本文整理匯總了Python中multiprocessing.queues.Empty方法的典型用法代碼示例。如果您正苦於以下問題:Python queues.Empty方法的具體用法?Python queues.Empty怎麽用?Python queues.Empty使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在multiprocessing.queues的用法示例。


在下文中一共展示了queues.Empty方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __call__

# 需要導入模塊: from multiprocessing import queues [as 別名]
# 或者: from multiprocessing.queues import Empty [as 別名]
def __call__(self, *args, **kwargs):
        queue_in = self.queue_in
        queue_out = self.queue_out
        logger = logging.getLogger()
        logger.addHandler(QueueHandler(queue_out))
        logger.setLevel(logging.DEBUG if self._debug else logging.INFO)
        db.init(self._settings['db_path'], False)

        self._ready()

        heartbeat_sequence = 1
        while True:
            try:
                task = queue_in.get(timeout=HEARTBEAT_INTERVAL)
                if isinstance(task, tasks.Task):
                    self._work(str(task))
                    self._done(task(**self._settings))
            except queues.Empty:
                self._heartbeat(heartbeat_sequence)
                heartbeat_sequence += 1
            except Exception as e:
                self._error(e, traceback.format_exc())
            except KeyboardInterrupt:
                break 
開發者ID:tabris17,項目名稱:doufen,代碼行數:26,代碼來源:worker.py

示例2: do_work

# 需要導入模塊: from multiprocessing import queues [as 別名]
# 或者: from multiprocessing.queues import Empty [as 別名]
def do_work(job_queue, counter=None):
    """ Process work function, read more fetch page jobs
    from queue until all jobs are finished
    """
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    while not job_queue.empty():
        try:
            job = job_queue.get_nowait()
            fetch_result_page(job)

            num_done = 0
            with counter.get_lock():
                counter.value += 1
                num_done = counter.value

            logging.info('{0} page(s) of {1} finished'.format(num_done,
                                                              job['num_pages']))
        except Empty:
            pass

        except KeyboardInterrupt:
            break

        except Exception:
            if not job:
                raise

            retries = job.get('retries', 0)
            if retries < job['max_retries']:
                logging.error('Retrying Page {0}'.format(job['page']))
                job['retries'] = retries + 1
                job_queue.put_nowait(job)
            else:
                logging.error('Max retries exceeded for page {0}'.
                              format(job['page'])) 
開發者ID:ikreymer,項目名稱:cdx-index-client,代碼行數:37,代碼來源:cdx-index-client.py

示例3: __iter__

# 需要導入模塊: from multiprocessing import queues [as 別名]
# 或者: from multiprocessing.queues import Empty [as 別名]
def __iter__(self) -> tp.Iterator[T]:

        while not self.is_done():

            if self.namespace.exception:
                exception, trace = self.exception_queue.get()

                try:
                    exception = exception(f"\n\n{trace}")
                except:
                    exception = Exception(f"\n\nOriginal: {exception}\n\n{trace}")

                raise exception

            try:
                x = self.get(timeout=pypeln_utils.TIMEOUT)
            except Empty:
                continue

            if isinstance(x, pypeln_utils.Done):
                with self.lock:
                    self.namespace.remaining -= 1

                continue

            yield x 
開發者ID:cgarciae,項目名稱:pypeln,代碼行數:28,代碼來源:queue.py

示例4: next

# 需要導入模塊: from multiprocessing import queues [as 別名]
# 或者: from multiprocessing.queues import Empty [as 別名]
def next(self, timeout=None):
        # with self._get_lock:
        #     if self._get_index == self._length:
        #         raise StopIteration
        #     item = self._items.get(timeout=timeout)
        #     self._get_index += 1
        #
        #     success, value = item
        #     if success:
        #         return value
        #     raise value

        self._cond.acquire()
        try:
            try:
                item = self._items.get_nowait()
                self._empty_sema.release()
            except Empty:
                if self._index == self._length:
                    raise StopIteration
                self._cond.wait(timeout)
                try:
                    item = self._items.get(timeout=timeout)
                    self._empty_sema.release()
                except Empty:
                    if self._index == self._length:
                        raise StopIteration
                    raise TimeoutError
        finally:
            self._cond.release()

        success, value = item
        if success:
            return value
        raise value 
開發者ID:CharlesShang,項目名稱:Detectron-PYTORCH,代碼行數:37,代碼來源:pool.py

示例5: _watch_worker

# 需要導入模塊: from multiprocessing import queues [as 別名]
# 或者: from multiprocessing.queues import Empty [as 別名]
def _watch_worker(self):
        """
        監控工作隊列
        """
        while True:
            try:
                ret = self._worker_output.get_nowait()
                if isinstance(ret, logging.LogRecord):
                    logging.root.handle(ret)
                    self.application.broadcast(json.dumps({
                        'sender': 'logger',
                        'message': ret.getMessage(),
                        'level': ret.levelname,
                    }))
                elif isinstance(ret, Worker.ReturnReady):
                    logging.info('"{0}" is ready'.format(ret.name))
                    self.application.broadcast(json.dumps({
                        'sender': 'worker',
                        'src': ret.name,
                        'event': 'ready',
                    }))
                    self._launch_task()
                elif isinstance(ret, Worker.ReturnDone):
                    logging.info('"{0}" has done'.format(ret.name))
                    self._workers[ret.name].toggle_task()
                    self.application.broadcast(json.dumps({
                        'sender': 'worker',
                        'src': ret.name,
                        'event': 'done',
                    }))
                    self._launch_task()
                elif isinstance(ret, Worker.ReturnWorking):
                    logging.info('"{0}" is working for "{1}"'.format(ret.name, ret.task))
                    self._workers[ret.name].toggle_task(ret.task)
                    self.application.broadcast(json.dumps({
                        'sender': 'worker',
                        'src': ret.name,
                        'event': 'working',
                        'target': str(ret.task),
                    }))
                elif isinstance(ret, Worker.ReturnError):
                    logging.error('"{0}" error: {1}\n{2}'.format(ret.name, ret.exception, ret.traceback))
                    self._workers[ret.name].toggle_task()
                    self.application.broadcast(json.dumps({
                        'sender': 'worker',
                        'src': ret.name,
                        'event': 'error',
                        'message': str(ret.exception),
                    }))
                    self._launch_task()
                elif isinstance(ret, Worker.ReturnHeartbeat):
                    logging.info('"{0}" heartbeat:{1}'.format(ret.name, ret.sequence))
            except queues.Empty:
                pass
            # 每隔0.1秒讀取一下隊列
            yield tornado.gen.sleep(0.1) 
開發者ID:tabris17,項目名稱:doufen,代碼行數:58,代碼來源:server.py

示例6: read

# 需要導入模塊: from multiprocessing import queues [as 別名]
# 或者: from multiprocessing.queues import Empty [as 別名]
def read(self, size=-1):
        # If the queuefile was closed or we have finished, send back any remaining data.
        if self._closed or self._done:
            if size == -1:
                buf = self._buffer
                self._buffer = b""
                return buf

            buf = self._buffer[0:size]
            self._buffer = self._buffer[size:]
            return buf

        # Loop until we reach the requested data size (or forever if all data was requested).
        while (len(self._buffer) < size) or (size == -1):
            exception = None
            try:
                result = self._queue.get(block=True, timeout=self._timeout)
                exception = result.exception
            except Empty as em:
                exception = em

            # Check for any exceptions raised by the queue process.
            if exception is not None:
                self._closed = True
                self.raised_exception = True

                # Fire off the exception to any registered handlers. If no handlers were registered,
                # then raise the exception locally.
                handled = False
                for handler in self._exception_handlers:
                    handler(exception)
                    handled = True

                if handled:
                    return b""
                else:
                    raise exception

            # Check for no further data. If the QueueProcess has finished producing data, then break
            # out of the loop to return the data already acquired.
            if result.data is None:
                self._done = True
                break

            # Add the data to the buffer.
            self._buffer += result.data
            self._total_size += len(result.data)

        # Return the requested slice of the buffer.
        if size == -1:
            buf = self._buffer
            self._buffer = b""
            return buf

        buf = self._buffer[0:size]
        self._buffer = self._buffer[size:]
        return buf 
開發者ID:quay,項目名稱:quay,代碼行數:59,代碼來源:queuefile.py


注:本文中的multiprocessing.queues.Empty方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。