当前位置: 首页>>代码示例>>Python>>正文


Python Queue.put_nowait方法代码示例

本文整理汇总了Python中multiprocessing.queues.Queue.put_nowait方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.put_nowait方法的具体用法?Python Queue.put_nowait怎么用?Python Queue.put_nowait使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.queues.Queue的用法示例。


在下文中一共展示了Queue.put_nowait方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import put_nowait [as 别名]
def main():
    proxy_queue = Queue()
    proxy_hosts = Queue()

    create_db()
    # 查询urls
    DB_CONN = get_conn()
    c = DB_CONN.cursor()
    LazyFW.log(r'''SELECT count(*) as `cnt` FROM `proxys_%s` where `speed` > %d;''' % (CURR_DATE, PROXY_TIMEOUT, ))
    c.execute(r'''SELECT count(*) as `cnt` FROM `proxys_%s` where `speed` > %d;''' % (CURR_DATE, PROXY_TIMEOUT, ))
    proxys = c.fetchone()
    c.close()
    if proxys[0] < 10:
        proxy_urls = get_proxy_urls()
        for url in proxy_urls:
            proxy_queue.put_nowait(url)

        workers = []
        for i in range(PROXY_THREAD_FETCH_MAX):
            p = Process(target=worker, args=('fetch_proxy', proxy_queue))
            p.daemon = True
            p.start()
            workers.append(p)

        for p in workers:
            p.join()
    DB_CONN.commit()
    DB_CONN.close()

    # 再次查询出数据
    DB_CONN = get_conn()
    LazyFW.log(r'''SELECT `host`,`port` FROM `proxys_%s` where `speed` > %d;''' % (CURR_DATE, PROXY_TIMEOUT, ))
    c = DB_CONN.cursor()
    c.execute(r'''SELECT `host`,`port` FROM `proxys_%s` where `speed` > %d;''' % (CURR_DATE, PROXY_TIMEOUT, ))
    for row in c.fetchall():
        proxy_hosts.put_nowait(row)

    c.close()
    DB_CONN.commit()
    DB_CONN.close()

    workers = []
    for i in range(PROXY_THREAD_TEST_PROXY_MAX):
        p = Process(target=worker, args=('proxy_test', proxy_hosts))
        p.daemon = True
        p.start()
        workers.append(p)

    for p in workers:
        p.join()
开发者ID:emilymwang8,项目名称:fang-broker,代码行数:52,代码来源:new_proxy.py

示例2: Queue

# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import put_nowait [as 别名]
from multiprocessing.queues import Queue, context
q = Queue(2, ctx=context._default_context)
q.put_nowait(None)
q.put_nowait(None)
q.put_nowait(None)
开发者ID:podhmo,项目名称:individual-sandbox,代码行数:7,代码来源:02queue.py

示例3: Client

# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import put_nowait [as 别名]

#.........这里部分代码省略.........
            if self.directory_observer.is_alive():
                self.directory_observer.stop()

    def on_any_event(self, event):
        """
        Listens to an event passed by 'watchdog' and checks the current
        master/slave status

        :param event: A :class:`~watchdog.events.FileSystemEvent`
        object passed by 'watchdog' indicating an event change within the
        specified directory.
        """
        file_name = event.src_path.split('/')[-1]
        if file_name in self._failover_files:
            self.execute_role_based_procedure()

    def execute_role_based_procedure(self):
        """
        Starts or stops components based on the role (Master/Slave) of the
        Postgres host.

        Implements a `binary exponential backoff
        <http://en.wikipedia.org/wiki/Exponential_backoff
        #Binary_exponential_backoff_.2F_truncated_exponential_backoff>`_
        up to 32 seconds if it encounters a FATAL connection error.
        """
        backoff = 0
        while True:
            try:
                server_is_master = self.master_pg_conn.is_server_master()
                if server_is_master:
                    self.log.warning('Server is a master, starting components')
                    self._start_components(restart=True)
                else:
                    self.log.warning('Server is a slave, stopping components')
                    self._stop_components()
                break
            except OperationalError:
                self._stop_components()

                self.log.warning(
                    'Cannot connect to the DB, maybe it has been shutdown?',
                    exc_info=True
                )

                if backoff:  # pragma: no cover
                    backoff <<= 1
                    if backoff > 32:
                        backoff = 1
                else:
                    backoff = 1
                sleep(backoff)

    def _handle_sigchld(self, sig, frame):
        """
        A child process dying, and the client not shutting down, indicates
        a process has been shut down by some external caller.

        We must check both the processor and listener for 'liveness' and
        start those which have failed.
        """
        if sig == SIGCHLD and self._should_run and not self._exception_raised:
            try:
                expected, action = self._processor.error_queue.get_nowait()
                self._exception_raised = True
                if expected:
                    if action == TERMINATE:
                        self.execute_role_based_procedure()
                else:
                    self.log.critical(
                        'An unexpected error was raised - shutting down'
                    )
                    self._shutdown()
            except Empty:
                self._child_interrupted = True
                self._start_components(restart=True)

    def _handle_terminate(self, sig, frame):
        """
        Handles SIGINT and SIGTERM signals.

        If called from another process then puts to the exit queue, else
        calls _shutdown.
        """
        if self.ident != os.getpid():
            self._exit_queue.put_nowait(True)
        else:
            self._shutdown()

    def _shutdown(self):
        """
        Shuts down the Client:
            * Sets '_should_run' to False.
            * Stops the components.
            * Stops the observer.
        """
        self.log.warning('Shutting down...')
        self._should_run = False
        self._stop_components()
        self._stop_observer()
开发者ID:transifex,项目名称:hermes,代码行数:104,代码来源:client.py

示例4: WorkerThread

# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import put_nowait [as 别名]

#.........这里部分代码省略.........
                    self._delete_count += 1
            except MemcachedError as error:
                if not self.moxi:
                    awareness.done()
                    try:
                        awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name)
                    except Exception:
                        # vbucket map is changing . sleep 5 seconds
                        time.sleep(5)
                        awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name)
                    self.log.info("now connected to {0} memcacheds".format(len(awareness.memcacheds)))
                    if isinstance(self.serverInfo, dict):
                        self.log.error(
                            "memcached error {0} {1} from {2}".format(error.status, error.msg, self.serverInfo["ip"])
                        )
                    else:
                        self.log.error(
                            "memcached error {0} {1} from {2}".format(error.status, error.msg, self.serverInfo.ip)
                        )
                if error.status == 134:
                    backoff_count += 1
                    if backoff_count < 5:
                        backoff_seconds = 15 * backoff_count
                    else:
                        backoff_seconds = 2 * backoff_count
                    self.log.info("received error # 134. backing off for {0} sec".format(backoff_seconds))
                    time.sleep(backoff_seconds)

                self._rejected_keys_count += 1
                self._rejected_keys.append({"key": key, "value": value})
                if len(self._rejected_keys) > self.ignore_how_many_errors:
                    break
            except Exception as ex:
                if not self.moxi:
                    awareness.done()
                    try:
                        awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name)
                    except Exception:
                        awareness = VBucketAwareMemcached(RestConnection(self.serverInfo), self.name)
                    self.log.info("now connected to {0} memcacheds".format(len(awareness.memcacheds)))
                if isinstance(self.serverInfo, dict):
                    self.log.error("error {0} from {1}".format(ex, self.serverInfo["ip"]))
                    import traceback

                    traceback.print_exc()
                else:
                    self.log.error("error {0} from {1}".format(ex, self.serverInfo.ip))
                self._rejected_keys_count += 1
                self._rejected_keys.append({"key": key, "value": value})
                if len(self._rejected_keys) > self.ignore_how_many_errors:
                    break

                    # before closing the session let's try sending those items again
        retry = 3
        while retry > 0 and self._rejected_keys_count > 0:
            rejected_after_retry = []
            self._rejected_keys_count = 0
            for item in self._rejected_keys:
                try:
                    if self.override_vBucketId >= 0:
                        client.vbucketId = self.override_vBucketId
                    if self.async_write:
                        client.send_set(item["key"], 0, 0, item["value"])
                    else:
                        client.set(item["key"], 0, 0, item["value"])
                    self._inserted_keys_count += 1
                except MemcachedError:
                    self._rejected_keys_count += 1
                    rejected_after_retry.append({"key": item["key"], "value": item["value"]})
                    if len(rejected_after_retry) > self.ignore_how_many_errors:
                        break
            self._rejected_keys = rejected_after_retry
            retry = -1
            # clean up the rest of the deleted keys
            if len(self._delete) > 0:
                #                self.log.info("deleting {0} keys".format(len(self._delete)))
                for key_del in self._delete:
                    client.delete(key_del)
                self._delete = []

            self.log.info("deleted {0} keys".format(self._delete_count))
            self.log.info("expiry {0} keys".format(self._expiry_count))
            #        client.close()
        awareness.done()
        if not self.write_only:
            self.queue.put_nowait("stop")
            self.reader.join()

    def _initialize_memcached(self):
        pass

    def _set(self):
        pass

    def _handle_error(self):
        pass
        # if error is memcached error oom related let's do a sleep

    def _time_to_stop(self):
        return self.aborted or len(self._rejected_keys) > self.ignore_how_many_errors
开发者ID:steveyen,项目名称:testrunner,代码行数:104,代码来源:data_helper.py


注:本文中的multiprocessing.queues.Queue.put_nowait方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。