本文整理汇总了Python中multiprocessing.Queue.close方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.close方法的具体用法?Python Queue.close怎么用?Python Queue.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.Queue
的用法示例。
在下文中一共展示了Queue.close方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: wrapped_dispatcher
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import close [as 别名]
class wrapped_dispatcher(object):
def __init__(self, enqueued=None, on_load=None):
self.queue = Queue()
kwargs = {
'queue': self.queue
}
if enqueued:
kwargs['enqueued_tasks'] = enqueued
if on_load:
kwargs['on_daemon_load'] = on_load
self.dispatcher = WrappedDispatcher(**kwargs)
self.context = None
self.sockets = {}
def __enter__(self):
self.dispatcher.start()
self.context = Context()
self.sockets['in'] = self.context.socket(PUSH)
self.sockets['out'] = self.context.socket(PULL)
self.sockets['in'].connect(settings.ZTASKD_URL)
self.sockets['out'].connect(settings.ZTASK_WORKER_URL)
return (self.queue, self.sockets['in'], self.sockets['out'])
def __exit__(self, exc_type, exc_value, traceback):
self.dispatcher.terminate()
self.context.destroy()
self.queue.close()
示例2: _workerQpushTimer
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import close [as 别名]
class _workerQpushTimer():
def __init__(self):
self.syncPeriod = 2
self.timer = None
self.Qinit()
def Qinit(self):
self.syncTmpQ = Queue()
# flush remain items in queue, and then close and join_thread
def Qflush(self):
while True:
try:
self.syncTmpQ.get(True, comm.FLUSH_TIMEOUT)
except Empty:
break
self.syncTmpQ.close()
self.syncTmpQ.join_thread()
def enableTimer(self, workerPool):
self.timer = Timer(self.syncPeriod, self.pushToWorkerQ, [workerPool])
self.timer.start()
def disableTimer(self):
if self.timer is not None:
self.timer.cancel()
# function executed periodically, used to sync queue between main process queue and worker queue
def pushToWorkerQ(self, workerPool):
while not comm.done.value:
try:
item = self.syncTmpQ.get_nowait()
for w in workerPool:
w.queue.put_nowait(item)
except Empty:
break
if not comm.done.value:
self.enableTimer(workerPool)
示例3: WorkerTest
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import close [as 别名]
class WorkerTest(TestCase):
"""Ensures the worker correctly handles messages
"""
def setUp(self):
self.queue = Queue()
self.context = Context()
self.socket = self.context.socket(PUSH)
self.socket.bind(settings.ZTASK_WORKER_URL)
self.worker = WrappedWorker(queue=self.queue)
self.worker.start()
def tearDown(self):
self.worker.terminate()
self.context.destroy()
def test_exec(self):
"""Tests executing a task
"""
uuid = str(uuid4())
self.socket.send_pyobj((uuid,))
self.assertEqual(
self.queue.get(),
uuid
)
self.assertTrue(self.queue.get())
self.queue.close()
示例4: TestQueue
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import close [as 别名]
class TestQueue(object):
def __init__(self, test_source_cls, test_type, tests, **kwargs):
self.queue = None
self.test_source_cls = test_source_cls
self.test_type = test_type
self.tests = tests
self.kwargs = kwargs
self.queue = None
def __enter__(self):
if not self.tests[self.test_type]:
return None
self.queue = Queue()
has_tests = self.test_source_cls.queue_tests(self.queue,
self.test_type,
self.tests,
**self.kwargs)
# There is a race condition that means sometimes we continue
# before the tests have been written to the underlying pipe.
# Polling the pipe for data here avoids that
self.queue._reader.poll(10)
assert not self.queue.empty()
return self.queue
def __exit__(self, *args, **kwargs):
if self.queue is not None:
self.queue.close()
self.queue = None
示例5: Channel
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import close [as 别名]
class Channel(object):
def __init__(self):
self._in = Queue()
self._out = Queue()
def incoroutine(self):
return coroutine.self() is self
def get(self):
q = self._in if self.incoroutine() else self._out
return q.get()
def put(self, *args):
q = self._out if self.incoroutine() else self._in
return q.put(*args)
def fileno(self):
q = self._in if self.incoroutine() else self._out
return q._reader.fileno()
def close(self):
self._in.close()
self._out.close()
def alive(self):
return bool(filter(alive, [self._in._reader, self._out._reader]))
示例6: disc_inserted_queue
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import close [as 别名]
def disc_inserted_queue():
q = Queue()
p = Process(target=process, args=(q,))
p.start()
yield q
q.close()
p.terminate()
示例7: instantiate
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import close [as 别名]
def instantiate(self, stream=None):
""" Start a local worker process
Blocks until the process is up and the center is properly informed
"""
if self.process and self.process.is_alive():
raise ValueError("Existing process still alive. Please kill first")
q = Queue()
self.process = Process(target=run_worker,
args=(q, self.ip, self.center.ip,
self.center.port, self.ncores,
self.port, self._given_worker_port,
self.local_dir, self.services, self.name))
self.process.daemon = True
self.process.start()
while True:
try:
msg = q.get_nowait()
if isinstance(msg, Exception):
raise msg
self.worker_port = msg['port']
assert self.worker_port
self.worker_dir = msg['dir']
break
except queues.Empty:
yield gen.sleep(0.1)
logger.info("Nanny %s:%d starts worker process %s:%d",
self.ip, self.port, self.ip, self.worker_port)
q.close()
raise gen.Return('OK')
示例8: CaptureIO
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import close [as 别名]
class CaptureIO(object):
def __init__(self, logger, do_capture):
self.logger = logger
self.do_capture = do_capture
self.logging_queue = None
self.logging_thread = None
self.original_stdio = None
def __enter__(self):
if self.do_capture:
self.original_stdio = (sys.stdout, sys.stderr)
self.logging_queue = Queue()
self.logging_thread = LogThread(self.logging_queue, self.logger, "info")
sys.stdout = LoggingWrapper(self.logging_queue, prefix="STDOUT")
sys.stderr = LoggingWrapper(self.logging_queue, prefix="STDERR")
self.logging_thread.start()
def __exit__(self, *args, **kwargs):
if self.do_capture:
sys.stdout, sys.stderr = self.original_stdio
if self.logging_queue is not None:
self.logger.info("Closing logging queue")
self.logging_queue.put(None)
if self.logging_thread is not None:
self.logging_thread.join(10)
while not self.logging_queue.empty():
try:
self.logger.warning("Dropping log message: %r", self.logging_queue.get())
except Exception:
pass
self.logging_queue.close()
self.logger.info("queue closed")
示例9: Scheduler
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import close [as 别名]
class Scheduler(object):
def __init__(self):
self.queue = Queue()
def consume(self, call_back):
while True:
task = self.queue.get()
if task is None:
self.queue.close()
break
time.sleep(0.05)
print("Queue got task: {}.".format(task))
call_back(task)
def produce(self, value):
time.sleep(random.uniform(0.1, 1.0))
task = "TSK {}".format(value)
self.queue.put(task)
def start(self, call_back, n_tasks=10):
consumer = Process(target=self.consume, args=(call_back,))
consumer.start()
workers = [Process(target=self.produce,args=(i,))
for i in range(n_tasks)]
for w in workers:
w.start()
for w in workers:
w.join()
self.queue.put(None)
consumer.join()
示例10: multi_threaded_generator
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import close [as 别名]
def multi_threaded_generator(generator, num_cached=10, num_threads=4):
queue = MPQueue(maxsize=num_cached)
# define producer (putting items into queue)
def producer():
for item in generator:
queue.put(item)
# pretend we are doing some calculations
# sleep(0.5)
queue.put("end")
# start producer (in a background thread)
threads = []
for _ in xrange(num_threads):
np.random.seed()
threads.append(Process(target=producer))
threads[-1].daemon = True
threads[-1].start()
# run as consumer (read items from queue, in current thread)
# print "starting while"
item = queue.get()
while item != "end":
# print len(item)
yield item
item = queue.get()
queue.close()
示例11: CommunicationChannels
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import close [as 别名]
class CommunicationChannels(object):
'''Bi directional communication channel
'''
def __init__(self):
self.qin = Queue()
self.qout = Queue()
def set_child(self):
q = self.qin
self.qin = self.qout
self.qout = q
def close(self):
self.qin.close()
self.qout.close()
def dump(self, obj):
self.qout.put(obj, block=True)
confirm = self.qin.get()
assert confirm
def load(self, conn=None):
res = self.qin.get()
self.qout.put(True)
return res
示例12: FakeSocket
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import close [as 别名]
class FakeSocket(object):
def __init__(self):
self.sent = Queue(100)
self.received = Queue(100)
def get(self):
""" Gets a message that was sent by this socket.
This method returns what the server would have received."""
return self.sent.get()
def put(self, msg):
""" Enqueues a message for the client to receive.
This method simulates receiving data over a socket. """
self.received.put(msg)
def send(self, data):
""" Socket interface for sending data to a client.
This data is retreivable through .get()"""
self.sent.put(data)
def recv(self, length = 0):
""" Socket interface for receiving data from a server.
This data is seedable through .put() """
return self.received.get()
def close(self):
self.sent.close()
self.received.close()
示例13: DataProcess
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import close [as 别名]
class DataProcess(Process):
def __init__(self, data_pipeline, **get_batch_kwargs):
super(DataProcess, self).__init__(name='neuralnilm-data-process')
self._stop = Event()
self._queue = Queue(maxsize=3)
self.data_pipeline = data_pipeline
self._get_batch_kwargs = get_batch_kwargs
def run(self):
batch = self.data_pipeline.get_batch(**self._get_batch_kwargs)
while not self._stop.is_set():
try:
self._queue.put(batch)
except AssertionError:
# queue is closed
break
batch = self.data_pipeline.get_batch(**self._get_batch_kwargs)
def get_batch(self, timeout=30):
if self.is_alive():
return self._queue.get(timeout=timeout)
else:
raise RuntimeError("Process is not running!")
def stop(self):
self._stop.set()
self._queue.close()
self.terminate()
self.join()
示例14: private_server
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import close [as 别名]
def private_server():
l = []
q = Queue()
p = Process(target=otp_worker, args=(q,))
p.start()
l.append(p)
p = Process(target=check_aws_cred_worker, args=(q,))
p.start()
l.append(p)
while True:
try:
time.sleep(20)
except:
traceback.print_exc()
break
print('main proc exiting.')
for _ in l:
q.put(None)
q.close()
for p in l:
p.join()
示例15: notify_queue
# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import close [as 别名]
def notify_queue(jid, password, notify_jids):
q = Queue()
p = Process(target=process, args=(q, jid, password, notify_jids))
p.start()
yield q
q.close()
p.terminate()