當前位置: 首頁>>代碼示例>>Python>>正文


Python asyncio.PriorityQueue方法代碼示例

本文整理匯總了Python中asyncio.PriorityQueue方法的典型用法代碼示例。如果您正苦於以下問題:Python asyncio.PriorityQueue方法的具體用法?Python asyncio.PriorityQueue怎麽用?Python asyncio.PriorityQueue使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在asyncio的用法示例。


在下文中一共展示了asyncio.PriorityQueue方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import PriorityQueue [as 別名]
def __init__(
            self,
            response_command_type: Union[Type[CommandAPI[Any]], Sequence[Type[CommandAPI[Any]]]],
            sort_key: Callable[[PerformanceAPI], float] = _items_per_second) -> None:
        """
        :param sort_key: how should we sort the peers to get the fastest? low score means top-ranked
        """
        self._waiting_peers = PriorityQueue()

        if isinstance(response_command_type, type):
            self._response_command_type = (response_command_type,)
        elif isinstance(response_command_type, collections.abc.Sequence):
            self._response_command_type = tuple(response_command_type)
        else:
            raise TypeError(f"Unsupported value: {response_command_type}")

        self._peer_wrapper = SortableTask.orderable_by_func(self._get_peer_rank)
        self._sort_key = sort_key 
開發者ID:ethereum,項目名稱:trinity,代碼行數:20,代碼來源:peers.py

示例2: __init__

# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import PriorityQueue [as 別名]
def __init__(self, bot):
        self.bot = bot
        self.json = compat_load(JSON)

        # queue variables
        self.queue = asyncio.PriorityQueue(loop=bot.loop)
        self.queue_lock = asyncio.Lock(loop=bot.loop)
        self.pending = {}
        self.enqueued = set()

        try:
            self.analytics = CogAnalytics(self)
        except Exception as error:
            self.bot.logger.exception(error)
            self.analytics = None

        self.task = bot.loop.create_task(self.on_load()) 
開發者ID:calebj,項目名稱:calebj-cogs,代碼行數:19,代碼來源:punish.py

示例3: __init__

# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import PriorityQueue [as 別名]
def __init__(
        self,
        connections,
        dead_timeout=60,
        timeout_cutoff=5,
        selector_class=RoundRobinSelector,
        randomize_hosts=True,
        *,
        loop,
        **kwargs
    ):
        self._dead_timeout = dead_timeout
        self.timeout_cutoff = timeout_cutoff
        self.connection_opts = connections
        self.connections = [c for (c, _) in connections]
        self.orig_connections = set(self.connections)
        self.dead = asyncio.PriorityQueue(len(self.connections), loop=loop)
        self.dead_count = collections.Counter()

        self.loop = loop

        if randomize_hosts:
            random.shuffle(self.connections)

        self.selector = selector_class(dict(connections)) 
開發者ID:aio-libs,項目名稱:aioelasticsearch,代碼行數:27,代碼來源:pool.py

示例4: __init__

# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import PriorityQueue [as 別名]
def __init__(self, robots, resolver):
        self.robots = robots
        self.resolver = resolver

        self.q = asyncio.PriorityQueue()
        self.ridealong = {}
        self.awaiting_work = 0
        self.maxhostqps = None
        self.delta_t = None
        self.next_fetch = cachetools.ttl.TTLCache(10000, 10)  # 10 seconds good enough for QPS=0.1 and up
        self.frozen_until = cachetools.ttl.TTLCache(10000, 10)  # 10 seconds is longer than our typical delay
        self.maxhostqps = float(config.read('Crawl', 'MaxHostQPS'))
        self.delta_t = 1./self.maxhostqps
        self.initialize_budgets()

        _, prefetch_dns = fetcher.global_policies()
        self.use_ip_key = prefetch_dns
        memory.register_debug(self.memory) 
開發者ID:cocrawler,項目名稱:cocrawler,代碼行數:20,代碼來源:scheduler.py

示例5: using_queues

# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import PriorityQueue [as 別名]
def using_queues():
    q = asyncio.Queue()

    q.put_nowait('Hello')

    await q.get()

    await q.put('world')

    q.get_nowait()


    pq = asyncio.PriorityQueue()

    stack = asyncio.LifoQueue() 
開發者ID:PacktPublishing,項目名稱:Daniel-Arbuckles-Mastering-Python,代碼行數:17,代碼來源:queue_usage.py

示例6: __init__

# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import PriorityQueue [as 別名]
def __init__(self, max_q_size=0):
        # this queue accepts tuples of the form:
        #  (block_index, InvItem(ITEM_TYPE_BLOCK, block_hash), future)
        self.block_hash_priority_queue = asyncio.PriorityQueue(max_q_size) 
開發者ID:richardkiss,項目名稱:pycoinnet,代碼行數:6,代碼來源:Blockfetcher.py

示例7: __init__

# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import PriorityQueue [as 別名]
def __init__(self, connections, *, dead_timeout=60, timeout_cutoff=5,
                 selector_factory=RoundRobinSelector,
                 loop):
        self._dead_timeout = dead_timeout
        self._timeout_cutoff = timeout_cutoff
        self._selector = selector_factory()
        self._dead = asyncio.PriorityQueue(len(connections), loop=loop)
        self._dead_count = collections.Counter()
        self._connections = connections
        self._loop = loop 
開發者ID:aio-libs-abandoned,項目名稱:aioes,代碼行數:12,代碼來源:pool.py

示例8: __init__

# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import PriorityQueue [as 別名]
def __init__(self, bot):
        self.bot = bot
        self.events = dataIO.load_json(JSON)
        self.queue = asyncio.PriorityQueue(loop=self.bot.loop)
        self.queue_lock = asyncio.Lock()
        self.pending = {}
        self.pending_by_event = defaultdict(lambda: list())
        self._load_events()
        self.task = bot.loop.create_task(self.queue_manager()) 
開發者ID:calebj,項目名稱:calebj-cogs,代碼行數:11,代碼來源:scheduler.py

示例9: __init__

# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import PriorityQueue [as 別名]
def __init__(self, bot):
        self.bot = bot
        self.events = fileIO('data/scheduler/events.json', 'load')
        self.queue = asyncio.PriorityQueue(loop=self.bot.loop)
        self.queue_lock = asyncio.Lock()
        self.to_kill = {}
        self._load_events() 
開發者ID:tekulvw,項目名稱:Squid-Plugins,代碼行數:9,代碼來源:scheduler.py

示例10: test_order

# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import PriorityQueue [as 別名]
def test_order(self):
        q = asyncio.PriorityQueue(loop=self.loop)
        for i in [1, 3, 2]:
            q.put_nowait(i)

        items = [q.get_nowait() for _ in range(3)]
        self.assertEqual([1, 2, 3], items) 
開發者ID:Microvellum,項目名稱:Fluid-Designer,代碼行數:9,代碼來源:test_queues.py

示例11: __init__

# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import PriorityQueue [as 別名]
def __init__(self,maxsize=0):
        super(PriorityQueue, self).__init__(maxsize) 
開發者ID:01ly,項目名稱:Amipy,代碼行數:4,代碼來源:queue.py

示例12: run

# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import PriorityQueue [as 別名]
def run():
    queue = asyncio.PriorityQueue()
    consumer = asyncio.ensure_future(consume(queue))
    await produce(queue)
    await queue.join()
    consumer.cancel() 
開發者ID:dongweiming,項目名稱:mp,代碼行數:8,代碼來源:prioqueue.py

示例13: __init__

# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import PriorityQueue [as 別名]
def __init__(self):
        self.__locker = {}
        self.__queue = PriorityQueue() 
開發者ID:wynfred,項目名稱:presso,代碼行數:5,代碼來源:eventqueue.py

示例14: load

# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import PriorityQueue [as 別名]
def load(self, crawler, f):
        header = pickle.load(f)  # XXX check that this is a good header... log it
        self.ridealong = pickle.load(f)
        crawler._seeds = pickle.load(f)
        self.q = asyncio.PriorityQueue()
        count = pickle.load(f)
        for _ in range(0, count):
            work = pickle.load(f)
            self.q.put_nowait(work) 
開發者ID:cocrawler,項目名稱:cocrawler,代碼行數:11,代碼來源:scheduler.py

示例15: __init__

# 需要導入模塊: import asyncio [as 別名]
# 或者: from asyncio import PriorityQueue [as 別名]
def __init__(self, target_batch_time=10, max_batch_size=500, inv_item_future_q_maxsize=1000):

        self._is_closing = False
        self._inv_item_future_queue = asyncio.PriorityQueue(maxsize=inv_item_future_q_maxsize)

        async def batch_getdata_fetches(peer_batch_tuple, q):
            peer, desired_batch_size = peer_batch_tuple
            batch = []
            skipped = []
            logger.info("peer %s trying to build batch up to size %d", peer, desired_batch_size)
            while len(batch) == 0 or (
                    len(batch) < desired_batch_size and not self._inv_item_future_queue.empty()):
                item = await self._inv_item_future_queue.get()
                (priority, inv_item, f, peers_tried) = item
                if f.done():
                    continue
                if peer in peers_tried:
                    skipped.append(item)
                else:
                    batch.append(item)
            if len(batch) > 0:
                await q.put((peer, batch, desired_batch_size))
            for item in skipped:
                if not item[2].done:
                    await self._inv_item_future_queue.put(item)

        async def fetch_batch(peer_batch, q):
            loop = asyncio.get_event_loop()
            peer, batch, prior_max = peer_batch
            inv_items = [inv_item for (priority, inv_item, f, peers_tried) in batch]
            peer.send_msg("getdata", items=inv_items)
            start_time = loop.time()
            futures = [f for (priority, bh, f, peers_tried) in batch]
            await asyncio.wait(futures, timeout=target_batch_time)
            end_time = loop.time()
            batch_time = end_time - start_time
            logger.info("completed batch size of %d with time %f", len(inv_items), batch_time)
            completed_count = sum([1 for f in futures if f.done()])
            item_per_unit_time = completed_count / batch_time
            new_batch_size = min(prior_max * 4, int(target_batch_time * item_per_unit_time + 0.5))
            new_batch_size = min(max(1, new_batch_size), max_batch_size)
            logger.info("new batch size for %s is %d", peer, new_batch_size)
            for (priority, inv_item, f, peers_tried) in batch:
                if not f.done():
                    peers_tried.add(peer)
                    await self._inv_item_future_queue.put((priority, inv_item, f, peers_tried))
            await self._peer_batch_queue.put((peer, new_batch_size))

        self._peer_batch_queue = MappingQueue(
            dict(callback_f=batch_getdata_fetches),
            dict(callback_f=fetch_batch, input_q_maxsize=2),
        )

        self._inv_item_hash_to_future = dict() 
開發者ID:gdassori,項目名稱:spruned,代碼行數:56,代碼來源:inv_batcher.py


注:本文中的asyncio.PriorityQueue方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。