本文整理匯總了Python中gevent.queue.JoinableQueue.qsize方法的典型用法代碼示例。如果您正苦於以下問題:Python JoinableQueue.qsize方法的具體用法?Python JoinableQueue.qsize怎麽用?Python JoinableQueue.qsize使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類gevent.queue.JoinableQueue
的用法示例。
在下文中一共展示了JoinableQueue.qsize方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_service_failure
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import qsize [as 別名]
def test_service_failure():
"Service() should handle custom callback failures"
# Given the following service
class MyService(Service):
def __init__(self, result_queue=None):
super(MyService, self).__init__(
callback=self.run,
result_queue=result_queue,
)
def run(self, package, sender_data):
raise ValueError("I don't want to do anything")
queue = JoinableQueue()
service = MyService(result_queue=queue)
# When I queue a package to be processed by my service and start the
# service with 1 concurrent worker
service.queue('gherkin==0.1.0', 'main')
service.consume()
service.pool.join() # Ensure we finish spawning the greenlet
# Then I see that no package was processed
queue.qsize().should.equal(0)
# And that the list of failed packages was updated
service.failed_queue[0][0].should.equal('gherkin==0.1.0')
service.failed_queue[0][1].should.be.a(ValueError)
service.failed_queue[0][1].message.should.equal("I don't want to do anything")
示例2: test_main
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import qsize [as 別名]
def test_main(self):
queue = JoinableQueue()
print dir(queue)
queue.put(1)
queue.put(3)
queue.put(2)
queue.put(6)
print queue.qsize()
print '1', queue.get(), queue.get()
示例3: RequestBase
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import qsize [as 別名]
class RequestBase(object):
def __init__(self,url,parameter,HTTPClients,ClientConnectionPool,task=None):
if task is not None:
self.celeryTask = task
self.celeryTaskId = task.request.id
else:
self.celeryTask = None
self.parameter = parameter
self.url = url
self.numberHTTPClients = HTTPClients
self.numberClientConnectionPool = ClientConnectionPool
self.http = HTTPClient.from_url(URL(url),concurrency=self.numberClientConnectionPool)
self.clientPool = gevent.pool.Pool(self.numberHTTPClients)
self.workQueue = JoinableQueue()
self.resultList = {}
self.workQueueMax = 0
self.workQueueDone = 0
self.countRequests = 0
self.status_codes = {}
self.status_codes_count = {}
self.meta = {}
self.greenletList = {}
self.initAdditionalStructures()
self.progressMeta = None
self.exitFlag = False
self.pauseRequests = False
def destroy(self):
self.http.close()
def initAdditionalStructures(self):
pass
def destroyAdditionstrucutres(self):
pass
def getProgress(self):
return self.meta
def updateProgress(self,state="PROGRESS"):
'''Updates the status'''
self.meta = {'state':state,'workQueueDone': self.workQueueDone, 'workQueueMax': self.workQueueMax,'current':len(self.resultList),'workQueue':self.workQueue.qsize(),'requests':self.countRequests}
#iterate over status_codes dict and save the queue size. may be not the best solution from performance view
for code,queue in self.status_codes.iteritems():
self.status_codes_count[code] = queue.qsize()
self.meta['status_codes'] = self.status_codes_count
if self.celeryTask is not None:
self.celeryTask.update_state(task_id=self.celeryTaskId,state=state,meta=self.meta)
def worker(self,http,clientId):
while not self.workQueue.empty() or self.exitFlag:
try:
code = self.makeRequest(http,self.getWorkQueueItem())
finally:
self.workQueue.task_done()
def stop(self):
self.exitFlag=True
def buildRequestURL(self,workQueueItem):
'''Function used to build the request URL from a workingQueue item'''
pass
def handleRequestSuccess(self,workQueueItem, result):
'''Required function, called after every successful request'''
pass
def handleRequestFailure(self,result):
'''Function called after a failed request. For example error code 404'''
pass
def makeRequest(self,http,workQueueItem):
'''Makes the request to and '''
url_string = self.buildRequestURL(workQueueItem)
self.countRequests += 1
try:
response = http.get(URL(url_string).request_uri)
statusCode = response.status_code
#create a new queue if the response status_code did not exist and adds the item to the queue
if str(statusCode) not in self.status_codes:
self.status_codes[str(statusCode)] = JoinableQueue()
self.status_codes[str(statusCode)].put(workQueueItem)
try:
self.handleRequestSuccess(workQueueItem,response)
except SSLError,e:
print e
return statusCode
#.........這裏部分代碼省略.........
示例4: sleep
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import qsize [as 別名]
sleep(1)
else:
sleep(5)
if __name__ == '__main__':
t_status = spawn_link_exception(status_thread)
t_item_queue = spawn_link_exception(add_to_item_queue)
for i in range(80):
spawn_link_exception(run_find_item)
#t_index_items = spawn_link_exception(index_items)
for i in range(8):
spawn_link_exception(run_solr_queue, i)
#joinall([t_run_find_item, t_item_queue, t_index_items, t_solr])
sleep(1)
print 'join item_queue thread'
t_item_queue.join()
print 'item_queue thread complete'
#print 'join item_and_host_queue:', item_and_host_queue.qsize()
#item_and_host_queue.join()
#print 'item_and_host_queue complete'
for host, host_queue in host_queues.items():
qsize = host_queue.qsize()
print 'host:', host, qsize
host_queue.join()
print 'join solr_queue:', solr_queue.qsize()
solr_queue.join()
print 'solr_queue complete'
示例5: BaseLogger
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import qsize [as 別名]
class BaseLogger(Collected,Jobber):
"""\
This class implements one particular way to log things.
"""
storage = Loggers.storage
q = None
job = None
ready = False
_in_flush = False
def __init__(self, level):
self.level = level
global logger_nr
logger_nr += 1
if not hasattr(self,"name") or self.name is None:
self.name = Name(self.__class__.__name__, "x"+str(logger_nr))
super(BaseLogger,self).__init__()
self._init()
def _init(self):
"""Fork off the writer thread.
Override this to do nothing if you don't have one."""
self.q = JoinableQueue(100)
self.start_job("job",self._writer)
self.job.link(self.delete)
if self.ready is False:
self.ready = True
else:
self.stop_job("job") # concurrency issues?
def _writer(self):
errs = 0
for r in self.q:
try:
if r is FlushMe:
self._flush()
else:
self._log(*r)
except Exception as ex:
errs += 1
fix_exception(ex)
from moat.run import process_failure
process_failure(ex)
if errs > 10:
reraise(ex)
else:
if errs:
errs -= 1
finally:
self.q.task_done()
self.q.task_done() # for the StopIter
# Collection stuff
def list(self):
yield super(BaseLogger,self)
yield ("Type",self.__class__.__name__)
yield ("Level",LogNames[self.level])
yield ("Queue",self.q.qsize())
def info(self):
return LogNames[self.level]+": "+self.__class__.__name__
def delete(self, ctx=None):
if self.ready:
self.ready = None
super(BaseLogger,self).delete(ctx)
try:
if self.q:
self.q.put(StopIteration,block=False)
except Full:
## panic?
pass
if self.job is not None:
self.job.join(timeout=1)
self.stop_job("job")
def _wlog(self, *a):
try:
self.q.put(a, block=False)
except Full:
## panic?
self.delete()
def _log(self, level, *a):
a=" ".join(( x if isinstance(x,six.string_types) else str(x) for x in a))
self._slog(level,a)
def _slog(self, a):
raise NotImplementedError("You need to override %s._log or ._slog" % (self.__class__.__name__,))
def _flush(self):
pass
def log(self, level, *a):
if LogLevels[level] >= self.level:
self._wlog(level,*a)
if TESTING and not (hasattr(a[0],"startswith") and a[0].startswith("TEST")):
#.........這裏部分代碼省略.........