本文整理匯總了Python中gevent.queue.JoinableQueue.put方法的典型用法代碼示例。如果您正苦於以下問題:Python JoinableQueue.put方法的具體用法?Python JoinableQueue.put怎麽用?Python JoinableQueue.put使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類gevent.queue.JoinableQueue
的用法示例。
在下文中一共展示了JoinableQueue.put方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: handle
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import put [as 別名]
def handle():
connection = create_postgresql_connection()
cursor = connection.cursor()
cursor.execute("BEGIN;")
cursor.execute("DELETE FROM core_ratequery;")
cursor.execute("COMMIT;")
cursor.close()
queue = JoinableQueue()
event = Event()
age_ids = age_map(connection).values() + [None]
sex_ids = sex_map(connection).values() + [None]
education_ids = education_map(connection).values() + [None]
province_ids = province_map(connection).values() + [None]
cursor = connection.cursor()
cursor.execute("SELECT DISTINCT cycle FROM core_microdata;");
cycles = [row[0] for row in cursor]
cursor.close()
greenlets = []
for i in range(50):
gv = gevent.spawn(worker, queue, event)
greenlets.append(gv)
combs = itertools.product(age_ids, sex_ids, province_ids, education_ids, cycles)
for c in combs:
queue.put(c)
queue.join()
event.set()
gevent.joinall(greenlets)
示例2: GQueue
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import put [as 別名]
class GQueue(object):
def __init__(self):
self.__QUEUE = JoinableQueue()
def job(self, func):
@functools.wraps(func)
def f(*args, **kwargs):
self.__QUEUE.put([func, args, kwargs])
return f
def join(self):
self.__QUEUE.join()
def work(self):
while True:
func, args, kwargs = self.__QUEUE.get()
try:
func(*args, **kwargs)
finally:
self.__QUEUE.task_done()
def run_worker(self, num=1):
for i in range(num):
gevent.spawn(self.work)
示例3: test_main
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import put [as 別名]
def test_main(self):
queue = JoinableQueue()
print dir(queue)
queue.put(1)
queue.put(3)
queue.put(2)
queue.put(6)
print queue.qsize()
print '1', queue.get(), queue.get()
示例4: Receiver
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import put [as 別名]
class Receiver(gevent.Greenlet):
PORT = 20000
CHUNK = 512
def __init__(self):
gevent.Greenlet.__init__(self)
self.queue = JoinableQueue()
def _run(self):
context = zmq.Context()
receiver = context.socket(zmq.PULL)
receiver.connect("tcp://localhost:%s" % self.PORT)
print 'rcv_on'
while True:
frame = receiver.recv()
sys.stdout.write('.')
sys.stdout.flush()
self.queue.put(frame)
time.sleep(0.0001)
示例5: spider
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import put [as 別名]
def spider(start_url, max_depth=1, no_of_workers=10, page_fn=check_page_for_profanities):
"""
Concurrently spider the web, starting from web page, executing page_fn
on each page.
start_url specifies the document the spider starts from.
max_depth specifies the maximum link depth from the start_url that
processing will occur.
no_of_workers specifies how many concurrent workers process the job queue.
page_fn is a function that takes BeautifulSoup parsed html and a url and
processes them as required
"""
seen_urls = set((start_url,))
job_queue = JoinableQueue()
job_queue.put((start_url, max_depth))
for i in range(no_of_workers):
gevent.spawn(job_worker, job_queue, seen_urls, page_fn)
job_queue.join()
示例6: handle
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import put [as 別名]
def handle():
#The expected format is:
#ciclo edad sexo nforma prov aoi factorel
csv_path = sys.argv[1]
queue = JoinableQueue()
event = Event()
greenlets = []
for i in range(90):
gv = gevent.spawn(worker, queue, event)
greenlets.append(gv)
with io.open(csv_path, 'r') as f:
for line in f:
queue.put(line)
queue.join()
event.set()
gevent.joinall(greenlets)
示例7: start
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import put [as 別名]
def start(self):
if not self.__threads:
self.__threads = len(IPNetwork(self.__ip)) if len(IPNetwork(self.__ip)) <= 10 else 10
if len(IPNetwork(self.__ip)) < int(self.__threads):
print "Please decrease number of threads to number of hosts <= %s" % len(IPNetwork(self.__ip))
exit()
queue = JoinableQueue()
[queue.put(str(ip)) for ip in IPNetwork(self.__ip)]
workers = [spawn(self.get_ip_info, queue, self.__apis) for t in range(int(self.__threads))]
queue.join()
示例8: Importer
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import put [as 別名]
class Importer(object):
def __init__(self, creds, pool_size=POOL_SIZE):
self.client = get_session(creds['host'],
creds['key'],
creds['secret'])
self.queue = JoinableQueue(maxsize=POOL_SIZE*2)
for i in range(pool_size):
gevent.spawn(self.worker)
def worker(self):
while True:
job = self.queue.get()
typ = job.get('type')
try:
if typ == 'device':
self._process_device(job['data'])
elif typ == 'datapoints':
self._process_datapoints(job['data'])
finally:
self.queue.task_done()
def write_devices(self, devices):
for device in devices:
self.queue.put({'type': 'device', 'data': device})
self.queue.join()
def write_datapoints_from_file(self, infile):
points = {}
lineno = 0
for line in infile:
lineno += 1
(device, sensor, ts, val) = line.split('\t')
pts = points.setdefault(device, {}).setdefault(sensor, [])
pts.append({'t': ts, 'v': float(val)})
if lineno % 1000 == 0:
self.queue.put({'type': 'datapoints', 'data': points})
points = {}
if points:
self.queue.put({'type': 'datapoints', 'data': points})
self.queue.join()
def _process_device(self, device, retries=5):
res = self.client.create_device(device)
if res.successful != tempoiq.response.SUCCESS:
if 'A device with that key already exists' in res.body:
print("Skipping creating existing device {}"
.format(device['key']))
return
if retries > 0:
print("Retrying device create {}, error {}"
.format(device['key'], res.body))
self._process_device(device, retries - 1)
else:
print("Retries exceeded; couldn't create device {}"
.format(device['key']))
def _process_datapoints(self, write_request, retries=5):
try:
res = self.client.write(write_request)
except Exception, e:
print("ERROR with request: --->")
print(json.dumps(write_request, default=WriteEncoder().default))
raise e
if res.successful != tempoiq.response.SUCCESS:
if retries > 0:
print("Retrying write, error was: {}".format(res.body))
return self._process_datapoints(write_request, retries - 1)
else:
print("Retries exceeded; lost data!")
print(json.dumps(write_request, default=WriteEncoder().default))
return True
return False
示例9: InterceptedStreamsMixin
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import put [as 別名]
class InterceptedStreamsMixin(object):
"""
Mixin class for GethProcess instances that feeds all of the stdout and
stderr lines into some set of provided callback functions.
"""
stdout_callbacks = None
stderr_callbacks = None
def __init__(self, *args, **kwargs):
super(InterceptedStreamsMixin, self).__init__(*args, **kwargs)
self.stdout_callbacks = []
self.stdout_queue = JoinableQueue()
self.stderr_callbacks = []
self.stderr_queue = JoinableQueue()
def register_stdout_callback(self, callback_fn):
self.stdout_callbacks.append(callback_fn)
def register_stderr_callback(self, callback_fn):
self.stderr_callbacks.append(callback_fn)
def produce_stdout_queue(self):
for line in iter(self.proc.stdout.readline, b''):
self.stdout_queue.put(line)
gevent.sleep(0)
def produce_stderr_queue(self):
for line in iter(self.proc.stderr.readline, b''):
self.stderr_queue.put(line)
gevent.sleep(0)
def consume_stdout_queue(self):
while True:
line = self.stdout_queue.get()
for fn in self.stdout_callbacks:
fn(line.strip())
gevent.sleep(0)
def consume_stderr_queue(self):
while True:
line = self.stderr_queue.get()
for fn in self.stderr_callbacks:
fn(line.strip())
gevent.sleep(0)
def start(self):
super(InterceptedStreamsMixin, self).start()
gevent.spawn(self.produce_stdout_queue)
gevent.spawn(self.produce_stderr_queue)
gevent.spawn(self.consume_stdout_queue)
gevent.spawn(self.consume_stderr_queue)
def stop(self):
super(InterceptedStreamsMixin, self).stop()
try:
self.stdout_queue.join(5)
except Timeout:
pass
try:
self.stderr_queue.join(5)
except Timeout:
pass
示例10: BaseLogger
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import put [as 別名]
class BaseLogger(Collected,Jobber):
"""\
This class implements one particular way to log things.
"""
storage = Loggers.storage
q = None
job = None
ready = False
_in_flush = False
def __init__(self, level):
self.level = level
global logger_nr
logger_nr += 1
if not hasattr(self,"name") or self.name is None:
self.name = Name(self.__class__.__name__, "x"+str(logger_nr))
super(BaseLogger,self).__init__()
self._init()
def _init(self):
"""Fork off the writer thread.
Override this to do nothing if you don't have one."""
self.q = JoinableQueue(100)
self.start_job("job",self._writer)
self.job.link(self.delete)
if self.ready is False:
self.ready = True
else:
self.stop_job("job") # concurrency issues?
def _writer(self):
errs = 0
for r in self.q:
try:
if r is FlushMe:
self._flush()
else:
self._log(*r)
except Exception as ex:
errs += 1
fix_exception(ex)
from moat.run import process_failure
process_failure(ex)
if errs > 10:
reraise(ex)
else:
if errs:
errs -= 1
finally:
self.q.task_done()
self.q.task_done() # for the StopIter
# Collection stuff
def list(self):
yield super(BaseLogger,self)
yield ("Type",self.__class__.__name__)
yield ("Level",LogNames[self.level])
yield ("Queue",self.q.qsize())
def info(self):
return LogNames[self.level]+": "+self.__class__.__name__
def delete(self, ctx=None):
if self.ready:
self.ready = None
super(BaseLogger,self).delete(ctx)
try:
if self.q:
self.q.put(StopIteration,block=False)
except Full:
## panic?
pass
if self.job is not None:
self.job.join(timeout=1)
self.stop_job("job")
def _wlog(self, *a):
try:
self.q.put(a, block=False)
except Full:
## panic?
self.delete()
def _log(self, level, *a):
a=" ".join(( x if isinstance(x,six.string_types) else str(x) for x in a))
self._slog(level,a)
def _slog(self, a):
raise NotImplementedError("You need to override %s._log or ._slog" % (self.__class__.__name__,))
def _flush(self):
pass
def log(self, level, *a):
if LogLevels[level] >= self.level:
self._wlog(level,*a)
if TESTING and not (hasattr(a[0],"startswith") and a[0].startswith("TEST")):
#.........這裏部分代碼省略.........
示例11: start_fluud
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import put [as 別名]
def start_fluud():
parser = argparse.ArgumentParser()
parser.add_argument('host', help='mongo host')
parser.add_argument('port', help='mongo port')
parser.add_argument('--login', help='mongo login')
parser.add_argument('--password', help='mongo password')
args = parser.parse_args()
if args.login and args.password:
login = urllib.quote_plus(args.login)
password = urllib.quote_plus(args.password)
uri = 'mongodb://{}:{}@{}:{}/'.format(login, password, args.host, args.port)
else:
uri = 'mongodb://{}:{}/'.format(args.host, args.port)
client = MongoClient(uri)
template = {
"first_sample_timestamp": dateutil.parser.parse("2015-09-02T13:08:20.314Z"),
"last_sample_timestamp": dateutil.parser.parse("2015-09-02T13:08:20.314Z"),
"metadata": {
"typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event",
"initiator": {
"typeURI": "service/security/account/user",
"host": {
"address": "192.168.0.2"
},
"id": "openstack:610e7d74-16af-4358-9b77-5275194fa6e4",
"name": "8b07b49216d243d2b49561759bd104f4"
},
"target": {
"typeURI": "service/security/account/user",
"id": "openstack:fc43ddcf-d147-466c-adfe-d60bd2b773ba"
},
"observer": {
"typeURI": "service/security",
"id": "openstack:a256def4-0a36-472e-95e5-e456db4e0681"
},
"eventType": "activity",
"eventTime": "2015-09-02T13:08:20.256770+0000",
"host": "identity.node-1",
"action": "authenticate",
"outcome": "success",
"id": "openstack:00244b9a-1a43-48a5-b75e-9d68dd647487",
"event_type": "identity.authenticate"
},
"meter": [
{
"counter_name": "identity.authenticate.success",
"counter_unit": "user",
"counter_type": "delta"
}
],
"project_id": None,
"source": "openstack",
"user_id": "openstack:610e7d74-16af-4358-9b77-5275194fa6e4"
}
data = [copy.deepcopy(template) for _ in range(10000)]
def progress():
while True:
print client.ceilometer.resource.count()
sys.stdout.flush()
sleep(2)
spawn(progress)
def worker():
while True:
q.get()
try:
client.ceilometer.resource.insert_many(copy.deepcopy(data), False)
finally:
q.task_done()
q = JoinableQueue()
for i in range(10):
spawn(worker)
for i in range(100):
q.put(0)
q.join()
示例12: _response_handler
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import put [as 別名]
def _response_handler (self, env, start_response):
"""handle HTTP request/response"""
uri_path = env["PATH_INFO"]
body = JoinableQueue()
if self._uow and self._uow.handle_endpoints(self, uri_path, env, start_response, body):
pass
##########################################
# Worker endpoints
elif uri_path == '/shard/config':
# configure the service to run a shard
Greenlet(self.shard_config, env, start_response, body).start()
elif uri_path == '/shard/stop':
# shutdown the service
## NB: must parse POST data specially, to avoid exception
payload = loads(env["wsgi.input"].read())
Greenlet(self.shard_stop, payload).start_later(1)
# HTTP response starts first, to avoid error after server stops
start_response('200 OK', [('Content-Type', 'text/plain')])
body.put("Goodbye\r\n")
body.put(StopIteration)
elif uri_path == '/queue/wait':
# wait until all shards have finished sending task_queue requests
Greenlet(self.queue_wait, env, start_response, body).start()
elif uri_path == '/queue/join':
# join on the task_queue, as a barrier to wait until it empties
Greenlet(self.queue_join, env, start_response, body).start()
elif uri_path == '/check/persist':
## NB: TODO checkpoint the service state to durable storage
start_response('200 OK', [('Content-Type', 'text/plain')])
body.put("Bokay\r\n")
body.put(StopIteration)
elif uri_path == '/check/recover':
## NB: TODO restart the service, recovering from most recent checkpoint
start_response('200 OK', [('Content-Type', 'text/plain')])
body.put("Bokay\r\n")
body.put(StopIteration)
##########################################
# HashRing endpoints
elif uri_path == '/ring/init':
# initialize the HashRing
Greenlet(self.ring_init, env, start_response, body).start()
elif uri_path == '/ring/add':
## NB: TODO add a node to the HashRing
start_response('200 OK', [('Content-Type', 'text/plain')])
body.put("Bokay\r\n")
body.put(StopIteration)
elif uri_path == '/ring/del':
## NB: TODO delete a node from the HashRing
start_response('200 OK', [('Content-Type', 'text/plain')])
body.put("Bokay\r\n")
body.put(StopIteration)
##########################################
# utility endpoints
elif uri_path == '/':
# dump info about the service in general
start_response('200 OK', [('Content-Type', 'text/plain')])
body.put(str(env) + "\r\n")
body.put(StopIteration)
else:
# ne znayu
start_response('404 Not Found', [('Content-Type', 'text/plain')])
body.put('Not Found\r\n')
body.put(StopIteration)
return body
示例13: worker
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import put [as 別名]
m.update(response_content)
m.digest()
#Extract the links and add them to the queue. Using links_added
#counter to limit the number of links to fetch.
for link in re.findall('<a href="(http.*?)"', response_content):
if links_added < num_to_crawl:
links_added += 1
q.put(link)
#Worker spawned by gevent. Continously gets links, works on them and marks
#them as done.
def worker(crawler_id):
while True:
item = q.get()
try:
do_work(item, crawler_id)
finally:
q.task_done()
#Spawning worker threads.
crawler_id = 0
for i in range(num_worker_threads):
gevent.spawn(worker, crawler_id)
crawler_id += 1
q.put(source)
links_added += 1
q.join() # block until all tasks are done
示例14: len
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import put [as 別名]
pass
#print ('%s: %s bytes: %r' % (url, len(data), data[:50]))
def worker():
while True:
url = q.get()
try:
print_head(url)
finally:
q.task_done()
NUM_WORKER_THREADS = 50
NUM_REQUESTS = 5000
q = JoinableQueue()
for i in range(NUM_WORKER_THREADS):
gevent.spawn(worker)
start_time = time.time()
for i in xrange(NUM_REQUESTS):
url = 'http://127.0.0.1/' + str(i)
q.put(url)
q.join() # block until all tasks are done
end_time = time.time()
show_stats( start_time, end_time, NUM_REQUESTS)
示例15: Service
# 需要導入模塊: from gevent.queue import JoinableQueue [as 別名]
# 或者: from gevent.queue.JoinableQueue import put [as 別名]
class Service(object):
def __init__(self, callback, **args):
self.callback = callback
self.result_queue = args.get('result_queue')
self.package_queue = JoinableQueue()
self.failed_queue = []
self.env = args.get('env')
self.main_greenlet = None
self.pool = Pool(args.get('concurrency'))
self.should_run = True
self.subscribers = []
self.logger = Logger(self.name, args.get('log_level'))
@property
def name(self):
return self.__class__.__name__.lower()
def queue(self, package, sender_name, **data):
assert (sender_name == 'downloadmanager' and data.get('path')) or True
self.package_queue.put((package, (sender_name, data)))
self.logger.level(3, ' * queue(from=%s, to=%s, package=%s, data=%s)',
sender_name, self.name, package, data)
def consume(self):
package, sender_data = self.package_queue.get()
self.pool.spawn(self._run_service, package, sender_data)
self.logger.level(3, ' * %s.run(package=%s, sender_data=%s)',
self.name, package, sender_data)
def subscribe(self, other):
other.subscribers.append(self)
def loop(self):
while self.should_run:
self.consume()
def start(self):
self.main_greenlet = gevent.spawn(self.loop)
def stop(self, force=False):
# This will force the current iteraton on `loop()` to be the last one,
# so the thing we're processing will be able to finish;
self.should_run = False
# if the caller is in a hurry, we'll just kill everything mercilessly
if force and self.main_greenlet:
self.main_greenlet.kill()
def _run_service(self, package, sender_data):
try:
data = self.callback(package, sender_data)
except NotForMe:
return
except ReportableError as exc:
self.failed_queue.append((package, exc))
self.logger.level(0, "Error: %s", exc)
except BaseException as exc:
self.failed_queue.append((package, exc))
self.logger.traceback(4,
'failed to run %s (requested by:%s) for package %s:',
self.name, sender_data[0], package, exc=exc)
else:
# Let's notify our subscribers
for subscriber in self.subscribers:
subscriber.queue(package, self.name, **(data or {}))
# If the callback worked, let's go ahead and tell the world. If and
# only if requested by the caller, of course.
if self.result_queue:
self.result_queue.put(package)