本文整理汇总了Python中queue.Queue.append方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.append方法的具体用法?Python Queue.append怎么用?Python Queue.append使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类queue.Queue
的用法示例。
在下文中一共展示了Queue.append方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import append [as 别名]
class CallCenter:
def __init__(self):
self.active_calls = []
self.waiting_calls = Queue()
self.respondents = []
self.free_respondents = Queue()
self.managers = []
self.directors = []
def dispatch_call(self, call):
'''dispatches a new call'''
if len(self.free_respondents) == 0:
self.waiting_calls.enqueue(call)
return # all respondents are currently busy, please wait
self._dispatch_call(call)
def escalate(self, call):
'''escalates a call to the next employee level. can be because the employee is busy or
not equipped to handle the call'''
current_employee = call.employee
next_employee = current_employee.boss
if not next_employee.free:
next_employee = next_employee.boss # simplification: assume director is free
call.employee = next_employee
next_employee.free = False
current_employee.free = True
if current_employee.role == Role.respondent:
self.free_respondents.append(current_employee)
def call_end_receiver(self, call):
'''listens for signal that call has ended'''
self.active_calls.remove(call)
call.employee.free = True
def employee_free_receiver(self, employee):
'''listens for signal that employee has become free'''
self.free_respondents.append(employee)
next_call = self.waiting_calls.pop()
if next_call:
self._dispatch_call(next_call)
def _dispatch_call(self, call):
if call.employee:
return # the call is already dispatched
free_respondent = self.free_respondents.pop()
call.employee = free_respondent
free_respondent.free = False
call.start()
self.active_calls.append(call)
示例2: Crawler
# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import append [as 别名]
class Crawler(object):
"""A web crawler that processes links in a given website, recursively
following all links on that site. This crawler supports parallel execution.
Crawling is done by calling crawl with a page parser that queues new tasks
in this Crawler."""
def __init__(self, site, timeout, parallel=False):
self.site = site
self.timeout = timeout
self.parallel = parallel
self.queued = set() # set of URLs that have already been seen
if parallel:
# Synchronize access both to the set of seen URLs and the task queue
self.queued_lock = Lock()
self.queue = Queue()
else:
self.queue = []
self.url_count = 0
self.queue_url(site, site, None)
def put_task(self, task):
"""Queue the given task in this Crawler."""
if self.parallel:
self.queue.put(task)
else:
self.queue.append(task)
def get_task(self):
"""Retrieve a task from this Crawler. The caller should first check that
tasks remain."""
if self.parallel:
return self.queue.get()
else:
return self.queue.pop()
def task_done(self):
"""Inform the Crawler that a task has completed. This should be done
every time a task is finished."""
if self.parallel:
self.queue.task_done()
def all_done(self):
"""Check whether or not all tasks have completed."""
if self.parallel:
# No synchronization needed; unfinished_tasks will never hit 0
# unless everything is done
return self.queue.unfinished_tasks == 0
else:
return len(self.queue) == 0
def unsynchronized_already_seen(self, url):
"""Check if a URL has already been seen, adding it to the set of seen
URLs if not already there. Access to the set should be synchronized by
the caller if necessary."""
if not url or url in self.queued:
return True
self.queued.add(url)
self.url_count += 1
return False
def already_seen(self, url):
"""Check if the given URL has already been seen. Locks access to the set
of seen URLs if crawling is being done in parallel."""
if self.parallel:
with self.queued_lock: # lock access to set
return self.unsynchronized_already_seen(url)
else:
return self.unsynchronized_already_seen(url)
def queue_url(self, url, base, parent):
"""Queue the givn URL for reading, if it hasn't been seen before."""
url = make_url(url, base) # construct and/or simplify the URL
if self.already_seen(url):
return
# Only read the page if it is on this site and is HTML
read = url.startswith(self.site)
index = url.rindex('/')
page = url[index+1:]
index = page.rfind('.')
if index >= 0:
ext = page[index+1:]
if ext != 'html' and ext != 'htm':
read = False
# Safely queue a new task to process the URL
self.put_task((url, parent, read))
def handle_url(self, url_info, parser):
"""Process the URL specified by url_info with the given parser. Messages
produced by this method are intentionally unsynchronized."""
url, parent, read = url_info
print('handling:', url)
# Request, but don't read the page
try:
opened = urlopen(url, timeout=self.timeout)
except (HTTPError, URLError, socket.timeout) as e:
print('bad link in {0}: {1}'.format(parent, url))
print('error:', e)
return
#.........这里部分代码省略.........