当前位置: 首页>>代码示例>>Python>>正文


Python queues.Empty方法代码示例

本文整理汇总了Python中multiprocessing.queues.Empty方法的典型用法代码示例。如果您正苦于以下问题:Python queues.Empty方法的具体用法?Python queues.Empty怎么用?Python queues.Empty使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.queues的用法示例。


在下文中一共展示了queues.Empty方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __call__

# 需要导入模块: from multiprocessing import queues [as 别名]
# 或者: from multiprocessing.queues import Empty [as 别名]
def __call__(self, *args, **kwargs):
        queue_in = self.queue_in
        queue_out = self.queue_out
        logger = logging.getLogger()
        logger.addHandler(QueueHandler(queue_out))
        logger.setLevel(logging.DEBUG if self._debug else logging.INFO)
        db.init(self._settings['db_path'], False)

        self._ready()

        heartbeat_sequence = 1
        while True:
            try:
                task = queue_in.get(timeout=HEARTBEAT_INTERVAL)
                if isinstance(task, tasks.Task):
                    self._work(str(task))
                    self._done(task(**self._settings))
            except queues.Empty:
                self._heartbeat(heartbeat_sequence)
                heartbeat_sequence += 1
            except Exception as e:
                self._error(e, traceback.format_exc())
            except KeyboardInterrupt:
                break 
开发者ID:tabris17,项目名称:doufen,代码行数:26,代码来源:worker.py

示例2: do_work

# 需要导入模块: from multiprocessing import queues [as 别名]
# 或者: from multiprocessing.queues import Empty [as 别名]
def do_work(job_queue, counter=None):
    """ Process work function, read more fetch page jobs
    from queue until all jobs are finished
    """
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    while not job_queue.empty():
        try:
            job = job_queue.get_nowait()
            fetch_result_page(job)

            num_done = 0
            with counter.get_lock():
                counter.value += 1
                num_done = counter.value

            logging.info('{0} page(s) of {1} finished'.format(num_done,
                                                              job['num_pages']))
        except Empty:
            pass

        except KeyboardInterrupt:
            break

        except Exception:
            if not job:
                raise

            retries = job.get('retries', 0)
            if retries < job['max_retries']:
                logging.error('Retrying Page {0}'.format(job['page']))
                job['retries'] = retries + 1
                job_queue.put_nowait(job)
            else:
                logging.error('Max retries exceeded for page {0}'.
                              format(job['page'])) 
开发者ID:ikreymer,项目名称:cdx-index-client,代码行数:37,代码来源:cdx-index-client.py

示例3: __iter__

# 需要导入模块: from multiprocessing import queues [as 别名]
# 或者: from multiprocessing.queues import Empty [as 别名]
def __iter__(self) -> tp.Iterator[T]:

        while not self.is_done():

            if self.namespace.exception:
                exception, trace = self.exception_queue.get()

                try:
                    exception = exception(f"\n\n{trace}")
                except:
                    exception = Exception(f"\n\nOriginal: {exception}\n\n{trace}")

                raise exception

            try:
                x = self.get(timeout=pypeln_utils.TIMEOUT)
            except Empty:
                continue

            if isinstance(x, pypeln_utils.Done):
                with self.lock:
                    self.namespace.remaining -= 1

                continue

            yield x 
开发者ID:cgarciae,项目名称:pypeln,代码行数:28,代码来源:queue.py

示例4: next

# 需要导入模块: from multiprocessing import queues [as 别名]
# 或者: from multiprocessing.queues import Empty [as 别名]
def next(self, timeout=None):
        # with self._get_lock:
        #     if self._get_index == self._length:
        #         raise StopIteration
        #     item = self._items.get(timeout=timeout)
        #     self._get_index += 1
        #
        #     success, value = item
        #     if success:
        #         return value
        #     raise value

        self._cond.acquire()
        try:
            try:
                item = self._items.get_nowait()
                self._empty_sema.release()
            except Empty:
                if self._index == self._length:
                    raise StopIteration
                self._cond.wait(timeout)
                try:
                    item = self._items.get(timeout=timeout)
                    self._empty_sema.release()
                except Empty:
                    if self._index == self._length:
                        raise StopIteration
                    raise TimeoutError
        finally:
            self._cond.release()

        success, value = item
        if success:
            return value
        raise value 
开发者ID:CharlesShang,项目名称:Detectron-PYTORCH,代码行数:37,代码来源:pool.py

示例5: _watch_worker

# 需要导入模块: from multiprocessing import queues [as 别名]
# 或者: from multiprocessing.queues import Empty [as 别名]
def _watch_worker(self):
        """
        监控工作队列
        """
        while True:
            try:
                ret = self._worker_output.get_nowait()
                if isinstance(ret, logging.LogRecord):
                    logging.root.handle(ret)
                    self.application.broadcast(json.dumps({
                        'sender': 'logger',
                        'message': ret.getMessage(),
                        'level': ret.levelname,
                    }))
                elif isinstance(ret, Worker.ReturnReady):
                    logging.info('"{0}" is ready'.format(ret.name))
                    self.application.broadcast(json.dumps({
                        'sender': 'worker',
                        'src': ret.name,
                        'event': 'ready',
                    }))
                    self._launch_task()
                elif isinstance(ret, Worker.ReturnDone):
                    logging.info('"{0}" has done'.format(ret.name))
                    self._workers[ret.name].toggle_task()
                    self.application.broadcast(json.dumps({
                        'sender': 'worker',
                        'src': ret.name,
                        'event': 'done',
                    }))
                    self._launch_task()
                elif isinstance(ret, Worker.ReturnWorking):
                    logging.info('"{0}" is working for "{1}"'.format(ret.name, ret.task))
                    self._workers[ret.name].toggle_task(ret.task)
                    self.application.broadcast(json.dumps({
                        'sender': 'worker',
                        'src': ret.name,
                        'event': 'working',
                        'target': str(ret.task),
                    }))
                elif isinstance(ret, Worker.ReturnError):
                    logging.error('"{0}" error: {1}\n{2}'.format(ret.name, ret.exception, ret.traceback))
                    self._workers[ret.name].toggle_task()
                    self.application.broadcast(json.dumps({
                        'sender': 'worker',
                        'src': ret.name,
                        'event': 'error',
                        'message': str(ret.exception),
                    }))
                    self._launch_task()
                elif isinstance(ret, Worker.ReturnHeartbeat):
                    logging.info('"{0}" heartbeat:{1}'.format(ret.name, ret.sequence))
            except queues.Empty:
                pass
            # 每隔0.1秒读取一下队列
            yield tornado.gen.sleep(0.1) 
开发者ID:tabris17,项目名称:doufen,代码行数:58,代码来源:server.py

示例6: read

# 需要导入模块: from multiprocessing import queues [as 别名]
# 或者: from multiprocessing.queues import Empty [as 别名]
def read(self, size=-1):
        # If the queuefile was closed or we have finished, send back any remaining data.
        if self._closed or self._done:
            if size == -1:
                buf = self._buffer
                self._buffer = b""
                return buf

            buf = self._buffer[0:size]
            self._buffer = self._buffer[size:]
            return buf

        # Loop until we reach the requested data size (or forever if all data was requested).
        while (len(self._buffer) < size) or (size == -1):
            exception = None
            try:
                result = self._queue.get(block=True, timeout=self._timeout)
                exception = result.exception
            except Empty as em:
                exception = em

            # Check for any exceptions raised by the queue process.
            if exception is not None:
                self._closed = True
                self.raised_exception = True

                # Fire off the exception to any registered handlers. If no handlers were registered,
                # then raise the exception locally.
                handled = False
                for handler in self._exception_handlers:
                    handler(exception)
                    handled = True

                if handled:
                    return b""
                else:
                    raise exception

            # Check for no further data. If the QueueProcess has finished producing data, then break
            # out of the loop to return the data already acquired.
            if result.data is None:
                self._done = True
                break

            # Add the data to the buffer.
            self._buffer += result.data
            self._total_size += len(result.data)

        # Return the requested slice of the buffer.
        if size == -1:
            buf = self._buffer
            self._buffer = b""
            return buf

        buf = self._buffer[0:size]
        self._buffer = self._buffer[size:]
        return buf 
开发者ID:quay,项目名称:quay,代码行数:59,代码来源:queuefile.py


注:本文中的multiprocessing.queues.Empty方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。