本文整理匯總了Python中gevent.queue.JoinableQueue.task_done方法的典型用法代碼示例。如果您正苦於以下問題:Python JoinableQueue.task_done方法的具體用法?Python JoinableQueue.task_done怎麽用?Python JoinableQueue.task_done使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類gevent.queue.JoinableQueue
的用法示例。
在下文中一共展示了JoinableQueue.task_done方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: Speaker
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import task_done [as 別名]
class Speaker(gevent.Greenlet):
RATE = 44100
def __init__(self, rcv):
gevent.Greenlet.__init__(self)
self.rcv = rcv
PA = pyaudio.PyAudio()
self.pa= PA.open(
format= pyaudio.paInt16,
channels= 1,
rate= self.RATE,
output= True
)
self.queue = JoinableQueue()
def _run(self):
print 'spk_on'
while True:
try:
buf = self.rcv.queue.get()
except gevent.queue.Empty:
buf = '\0'
## print '.',
self.pa.write(buf)
time.sleep(0.0001)
self.queue.task_done()
self.pa.close()
示例2: test_api
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import task_done [as 別名]
def test_api(self):
queue = JoinableQueue()
task_group = self.api.search('terminator', queue)
while True:
finished = all(
[greenlet.ready() for greenlet in task_group.greenlets]
)
try:
item = queue.get(timeout=1.0)
except Empty:
if finished:
log.info('queue is empty and all jobs are done, quitting')
break
log.info(
'queue was empty and jobs are still running, retrying'
)
continue
try:
log.info('%r', item)
finally:
queue.task_done()
task_group.join()
queue.join()
log.info('joined everything')
示例3: GQueue
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import task_done [as 別名]
class GQueue(object):
def __init__(self):
self.__QUEUE = JoinableQueue()
def job(self, func):
@functools.wraps(func)
def f(*args, **kwargs):
self.__QUEUE.put([func, args, kwargs])
return f
def join(self):
self.__QUEUE.join()
def work(self):
while True:
func, args, kwargs = self.__QUEUE.get()
try:
func(*args, **kwargs)
finally:
self.__QUEUE.task_done()
def run_worker(self, num=1):
for i in range(num):
gevent.spawn(self.work)
示例4: on_search
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import task_done [as 別名]
def on_search(self, query):
log.debug('search for %r', query)
queue = JoinableQueue()
task_group = g.api.search(query, queue)
while True:
finished = all(
[t.ready() for t in task_group]
)
try:
item = queue.get(timeout=1.0)
except Empty:
if finished:
break
continue
try:
self.emit('result', item._asdict())
finally:
queue.task_done()
queue.join()
task_group.join()
self.emit('done', query)
示例5: _run
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import task_done [as 別名]
def _run(self):
paths = glob.glob(self.path)
while not paths:
gevent.sleep(0.01)
paths = glob.glob(self.path)
q = JoinableQueue()
self.logger.debug('Tailing %s' % ', '.join(paths))
self.tails = [Tail(p, q, self.statedir) for p in paths]
while True:
data = q.get()
if data:
if data.endswith('\n'):
data = data[0:-1]
self.logger.debug('Received: %r' % data)
self.output.put(Event(data=data))
q.task_done()
示例6: ScoringService
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import task_done [as 別名]
class ScoringService(Service):
"""A service that assigns a score to submission results.
A submission result is ready to be scored when its compilation is
unsuccessful (in this case, no evaluation will be performed) or
after it has been evaluated. The goal of scoring is to use the
evaluations to determine score, score_details, public_score,
public_score_details and ranking_score_details (all non-null).
Scoring is done by the compute_score method of the ScoreType
defined by the dataset of the result.
ScoringService keeps a queue of (submission_id, dataset_id) pairs
identifying submission results to score. A greenlet is spawned to
consume this queue, one item at a time. The queue is filled by the
new_evaluation and the invalidate_submissions RPC methods, and by a
sweeper greenlet, whose duty is to regularly check all submissions
in the database and put the unscored ones in the queue (this check
can also be forced by the search_jobs_not_done RPC method).
"""
# How often we look for submission results not scored.
SWEEPER_TIMEOUT = 347.0
def __init__(self, shard):
"""Initialize the ScoringService.
"""
Service.__init__(self, shard)
# Set up communication with ProxyService.
self.proxy_service = self.connect_to(ServiceCoord("ProxyService", 0))
# Set up and spawn the scorer.
# TODO Link to greenlet: when it dies, log CRITICAL and exit.
self._scorer_queue = JoinableQueue()
gevent.spawn(self._scorer_loop)
# Set up and spawn the sweeper.
# TODO Link to greenlet: when it dies, log CRITICAL and exit.
self._sweeper_start = None
self._sweeper_event = Event()
gevent.spawn(self._sweeper_loop)
def _scorer_loop(self):
"""Monitor the queue, scoring its top element.
This is an infinite loop that, at each iteration, gets an item
from the queue (blocking until there is one, if the queue is
empty) and scores it. Any error during the scoring is sent to
the logger and then suppressed, because the loop must go on.
"""
while True:
submission_id, dataset_id = self._scorer_queue.get()
try:
self._score(submission_id, dataset_id)
except Exception:
logger.error("Unexpected error when scoring submission %d on "
"dataset %d.", submission_id, dataset_id,
exc_info=True)
finally:
self._scorer_queue.task_done()
def _score(self, submission_id, dataset_id):
"""Assign a score to a submission result.
This is the core of ScoringService: here we retrieve the result
from the database, check if it is in the correct status,
instantiate its ScoreType, compute its score, store it back in
the database and tell ProxyService to update RWS if needed.
submission_id (int): the id of the submission that has to be
scored.
dataset_id (int): the id of the dataset to use.
"""
with SessionGen() as session:
# Obtain submission.
submission = Submission.get_from_id(submission_id, session)
if submission is None:
raise ValueError("Submission %d not found in the database." %
submission_id)
# Obtain dataset.
dataset = Dataset.get_from_id(dataset_id, session)
if dataset is None:
raise ValueError("Dataset %d not found in the database." %
dataset_id)
# Obtain submission result.
submission_result = submission.get_result(dataset)
# It means it was not even compiled (for some reason).
if submission_result is None:
raise ValueError("Submission result %d(%d) was not found." %
(submission_id, dataset_id))
# Check if it's ready to be scored.
if not submission_result.needs_scoring():
#.........這裏部分代碼省略.........
示例7: not
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import task_done [as 別名]
request['view_url']
)
if not(result is None):
url = request['result_url']
data = urllib.urlencode(json.loads(result))
req = urllib2.Request(url,data)
f = urllib2.urlopen(req)
response = f.read()
f.close()
except Exception, e:
log.error("Exception! - error returning results")
log.error(e)
finally:
pqueue.task_done()
#//////////////////////////////////////////////////////////
# MAIN FUNCTION
#//////////////////////////////////////////////////////////
def main():
global datadb, resourcedb
global um,pm,im
#-------------------------------
# setup logging
#-------------------------------
log.setLevel( logging.DEBUG )
示例8: __init__
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import task_done [as 別名]
class Migrator:
def __init__(self, scheme, create_devices=True,
write_data=True,
start_date="2000-01-01T00:00:00Z",
end_date="2014-12-31T00:00:00Z",
pool_size=3):
self.scheme = scheme
self.create_devices = create_devices
self.should_write_data = write_data
self.start_date = start_date
self.end_date = end_date
self.tdb = TDBClient(scheme.db_key, scheme.db_key,
scheme.db_secret,
base_url=scheme.db_baseurl)
iq_endpoint = HTTPEndpoint(scheme.iq_baseurl,
scheme.iq_key,
scheme.iq_secret)
self.tiq = TIQClient(iq_endpoint)
self.queue = JoinableQueue()
self.lock = Lock()
self.dp_count = 0
self.req_count = 0
self.dp_reset = time.time()
for i in range(pool_size):
gevent.spawn(self.worker)
def worker(self):
while True:
series = self.queue.get()
try:
self.migrate_series(series)
finally:
self.queue.task_done()
def migrate_all_series(self, start_key="", limit=None):
start_time = time.time()
(keys, tags, attrs) = self.scheme.identity_series_filter()
series_set = self.tdb.list_series(keys, tags, attrs)
# Keep our own state of whether we passed the resume point, so we don't
# need to assume client and server sort strings the same.
found_first_series = False
series_count = 0
for series in series_set:
if not found_first_series and series.key < start_key:
continue
else:
found_first_series = True
if limit and series_count >= limit:
print("Reached limit of %d devices, stopping." % (limit))
break
if self.scheme.identity_series_client_filter(series):
# If the series looks like an identity series,
# queue it to be processed by the threadpool
self.queue.put(series)
series_count += 1
self.queue.join()
end_time = time.time()
print("Exporting {} devices took {} seconds".format(series_count, end_time - start_time))
def migrate_series(self, series):
print(" Beginning to migrate series: %s" % (series.key))
error = False
try:
if self.create_devices:
error = self.create_device(series)
if self.should_write_data and not error:
error = self.write_data(series)
except Exception, e:
logging.exception(e)
error = True
if not error:
print("COMPLETED migrating for series %s" % (series.key))
else:
print("ERROR migrating series %s" % (series.key))
示例9: Importer
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import task_done [as 別名]
class Importer(object):
def __init__(self, creds, pool_size=POOL_SIZE):
self.client = get_session(creds['host'],
creds['key'],
creds['secret'])
self.queue = JoinableQueue(maxsize=POOL_SIZE*2)
for i in range(pool_size):
gevent.spawn(self.worker)
def worker(self):
while True:
job = self.queue.get()
typ = job.get('type')
try:
if typ == 'device':
self._process_device(job['data'])
elif typ == 'datapoints':
self._process_datapoints(job['data'])
finally:
self.queue.task_done()
def write_devices(self, devices):
for device in devices:
self.queue.put({'type': 'device', 'data': device})
self.queue.join()
def write_datapoints_from_file(self, infile):
points = {}
lineno = 0
for line in infile:
lineno += 1
(device, sensor, ts, val) = line.split('\t')
pts = points.setdefault(device, {}).setdefault(sensor, [])
pts.append({'t': ts, 'v': float(val)})
if lineno % 1000 == 0:
self.queue.put({'type': 'datapoints', 'data': points})
points = {}
if points:
self.queue.put({'type': 'datapoints', 'data': points})
self.queue.join()
def _process_device(self, device, retries=5):
res = self.client.create_device(device)
if res.successful != tempoiq.response.SUCCESS:
if 'A device with that key already exists' in res.body:
print("Skipping creating existing device {}"
.format(device['key']))
return
if retries > 0:
print("Retrying device create {}, error {}"
.format(device['key'], res.body))
self._process_device(device, retries - 1)
else:
print("Retries exceeded; couldn't create device {}"
.format(device['key']))
def _process_datapoints(self, write_request, retries=5):
try:
res = self.client.write(write_request)
except Exception, e:
print("ERROR with request: --->")
print(json.dumps(write_request, default=WriteEncoder().default))
raise e
if res.successful != tempoiq.response.SUCCESS:
if retries > 0:
print("Retrying write, error was: {}".format(res.body))
return self._process_datapoints(write_request, retries - 1)
else:
print("Retries exceeded; lost data!")
print(json.dumps(write_request, default=WriteEncoder().default))
return True
return False
示例10: RequestBase
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import task_done [as 別名]
class RequestBase(object):
def __init__(self,url,parameter,HTTPClients,ClientConnectionPool,task=None):
if task is not None:
self.celeryTask = task
self.celeryTaskId = task.request.id
else:
self.celeryTask = None
self.parameter = parameter
self.url = url
self.numberHTTPClients = HTTPClients
self.numberClientConnectionPool = ClientConnectionPool
self.http = HTTPClient.from_url(URL(url),concurrency=self.numberClientConnectionPool)
self.clientPool = gevent.pool.Pool(self.numberHTTPClients)
self.workQueue = JoinableQueue()
self.resultList = {}
self.workQueueMax = 0
self.workQueueDone = 0
self.countRequests = 0
self.status_codes = {}
self.status_codes_count = {}
self.meta = {}
self.greenletList = {}
self.initAdditionalStructures()
self.progressMeta = None
self.exitFlag = False
self.pauseRequests = False
def destroy(self):
self.http.close()
def initAdditionalStructures(self):
pass
def destroyAdditionstrucutres(self):
pass
def getProgress(self):
return self.meta
def updateProgress(self,state="PROGRESS"):
'''Updates the status'''
self.meta = {'state':state,'workQueueDone': self.workQueueDone, 'workQueueMax': self.workQueueMax,'current':len(self.resultList),'workQueue':self.workQueue.qsize(),'requests':self.countRequests}
#iterate over status_codes dict and save the queue size. may be not the best solution from performance view
for code,queue in self.status_codes.iteritems():
self.status_codes_count[code] = queue.qsize()
self.meta['status_codes'] = self.status_codes_count
if self.celeryTask is not None:
self.celeryTask.update_state(task_id=self.celeryTaskId,state=state,meta=self.meta)
def worker(self,http,clientId):
while not self.workQueue.empty() or self.exitFlag:
try:
code = self.makeRequest(http,self.getWorkQueueItem())
finally:
self.workQueue.task_done()
def stop(self):
self.exitFlag=True
def buildRequestURL(self,workQueueItem):
'''Function used to build the request URL from a workingQueue item'''
pass
def handleRequestSuccess(self,workQueueItem, result):
'''Required function, called after every successful request'''
pass
def handleRequestFailure(self,result):
'''Function called after a failed request. For example error code 404'''
pass
def makeRequest(self,http,workQueueItem):
'''Makes the request to and '''
url_string = self.buildRequestURL(workQueueItem)
self.countRequests += 1
try:
response = http.get(URL(url_string).request_uri)
statusCode = response.status_code
#create a new queue if the response status_code did not exist and adds the item to the queue
if str(statusCode) not in self.status_codes:
self.status_codes[str(statusCode)] = JoinableQueue()
self.status_codes[str(statusCode)].put(workQueueItem)
try:
self.handleRequestSuccess(workQueueItem,response)
except SSLError,e:
print e
return statusCode
#.........這裏部分代碼省略.........
示例11: HttpScanner
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import task_done [as 別名]
#.........這裏部分代碼省略.........
# Auth
if self.args.auth is not None:
items = self.args.auth.split(':')
self.session.auth = (items[0], items[1])
# Cookies
self.cookies = {}
if self.args.cookies is not None:
self.cookies = Cookies.from_request(self.args.cookies)
# Cookies from file
if self.args.load_cookies is not None:
if not path.exists(self.args.load_cookies) or not path.isfile(self.args.load_cookies):
self.output.print_and_log('Could not find cookie file: %s' % self.args.load_cookies, logging.ERROR)
exit(-1)
self.cookies = MozillaCookieJar(self.args.load_cookies)
self.cookies.load()
self.session.cookies = self.cookies
# User-Agent
self.ua = UserAgent() if self.args.random_agent else None
def worker(self, worker_id):
self.output.write_log('Worker %i started.' % worker_id)
while not self.hosts_queue.empty():
host = self.hosts_queue.get()
try:
self.scan_host(worker_id, host)
finally:
self.output.write_log('Worker %i finished.' % worker_id)
self.hosts_queue.task_done()
def _head_available(self, host):
"""
Determine if HEAD requests is allowed
:param host:
:return:
"""
# Trying to use OPTIONS request
try:
response = self.session.options(host, headers=self._fill_headers())
o = response.headers['allow'] if 'allow' in response.headers else None
if o is not None and o.find('HEAD') != -1:
return True
except:
# TODO: fix
pass
try:
return False if self.session.head(host, headers=self._fill_headers()).status_code == 405 else True
except:
# TODO: fix
return False
def scan_host(self, worker_id, host):
# check if resolvable
ip = helper.url_to_ip(host)
if ip is None:
self.output.write_log('Could not resolve %s Skipping...' % host, logging.WARNING)
self.output.urls_scanned += len(self.urls)
return
# Check for HEAD
示例12: BaseLogger
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import task_done [as 別名]
class BaseLogger(Collected,Jobber):
"""\
This class implements one particular way to log things.
"""
storage = Loggers.storage
q = None
job = None
ready = False
_in_flush = False
def __init__(self, level):
self.level = level
global logger_nr
logger_nr += 1
if not hasattr(self,"name") or self.name is None:
self.name = Name(self.__class__.__name__, "x"+str(logger_nr))
super(BaseLogger,self).__init__()
self._init()
def _init(self):
"""Fork off the writer thread.
Override this to do nothing if you don't have one."""
self.q = JoinableQueue(100)
self.start_job("job",self._writer)
self.job.link(self.delete)
if self.ready is False:
self.ready = True
else:
self.stop_job("job") # concurrency issues?
def _writer(self):
errs = 0
for r in self.q:
try:
if r is FlushMe:
self._flush()
else:
self._log(*r)
except Exception as ex:
errs += 1
fix_exception(ex)
from moat.run import process_failure
process_failure(ex)
if errs > 10:
reraise(ex)
else:
if errs:
errs -= 1
finally:
self.q.task_done()
self.q.task_done() # for the StopIter
# Collection stuff
def list(self):
yield super(BaseLogger,self)
yield ("Type",self.__class__.__name__)
yield ("Level",LogNames[self.level])
yield ("Queue",self.q.qsize())
def info(self):
return LogNames[self.level]+": "+self.__class__.__name__
def delete(self, ctx=None):
if self.ready:
self.ready = None
super(BaseLogger,self).delete(ctx)
try:
if self.q:
self.q.put(StopIteration,block=False)
except Full:
## panic?
pass
if self.job is not None:
self.job.join(timeout=1)
self.stop_job("job")
def _wlog(self, *a):
try:
self.q.put(a, block=False)
except Full:
## panic?
self.delete()
def _log(self, level, *a):
a=" ".join(( x if isinstance(x,six.string_types) else str(x) for x in a))
self._slog(level,a)
def _slog(self, a):
raise NotImplementedError("You need to override %s._log or ._slog" % (self.__class__.__name__,))
def _flush(self):
pass
def log(self, level, *a):
if LogLevels[level] >= self.level:
self._wlog(level,*a)
if TESTING and not (hasattr(a[0],"startswith") and a[0].startswith("TEST")):
#.........這裏部分代碼省略.........