本文整理汇总了Python中multiprocessing.queues.Queue类的典型用法代码示例。如果您正苦于以下问题:Python Queue类的具体用法?Python Queue怎么用?Python Queue使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Queue类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: force_stop
def force_stop(self):
self._stop_requested = True
# Just in case the user calls this directly. Will signal all threads to
# stop (save _com_thread).
self._stop.set()
self._log_queue.put(None)
self._monitor_queue.put((None, None))
# Set _force_stop to stop _com_thread.
self._force_stop.set()
# Terminate the process and make sure all threads stopped properly.
self._process.terminate()
self._log_thread.join()
self._monitor_thread.join()
self._com_thread.join()
self.active = False
if self._processing.is_set():
self.done = ('INTERRUPTED', 'The user forced the system to stop')
self._processing.clear()
# Discard the queues as they may have been corrupted when the process
# was terminated.
self._log_queue = Queue()
self._monitor_queue = Queue()
示例2: process_init
def process_init(self):
self.event_queue = Queue()
self.event_queue_name = str(id(self))
from compmake.plugins.backend_pmake.pmake_manager import PmakeManager
PmakeManager.queues[self.event_queue_name] = self.event_queue
# info('Starting %d processes' % self.num_processes)
self.subs = {} # name -> sub
# available + processing + aborted = subs.keys
self.sub_available = set()
self.sub_processing = set()
self.sub_aborted = set()
self.signal_queue = Queue()
db = self.context.get_compmake_db()
storage = db.basepath # XXX:
logs = os.path.join(storage, 'logs')
for i in range(self.num_processes):
name = 'w%02d' % i
write_log = os.path.join(logs, '%s.log' % name)
make_sure_dir_exists(write_log)
signal_token = name
self.subs[name] = PmakeSub(name,
signal_queue=self.signal_queue,
signal_token=signal_token,
write_log=write_log)
self.job2subname = {}
self.subname2job = {}
# all are available at the beginning
self.sub_available.update(self.subs)
self.max_num_processing = self.num_processes
示例3: Multiprocess
class Multiprocess(object):
# THE COMPLICATION HERE IS CONNECTING THE DISPARATE LOGGING TO
# A CENTRAL POINT
def __init__(self, functions):
self.outbound = Queue()
self.inbound = Queue()
self.inbound = Queue()
#MAKE
#MAKE THREADS
self.threads = []
for t, f in enumerate(functions):
thread = worker(
"worker " + unicode(t),
f,
self.inbound,
self.outbound,
)
self.threads.append(thread)
def __enter__(self):
return self
#WAIT FOR ALL QUEUED WORK TO BE DONE BEFORE RETURNING
def __exit__(self, a, b, c):
try:
self.inbound.close() # SEND STOPS TO WAKE UP THE WORKERS WAITING ON inbound.pop()
except Exception, e:
Log.warning("Problem adding to inbound", e)
self.join()
示例4: main
def main():
result_queue = Queue()
crawler = CrawlerWorker(CanberraWealtherSpider(), result_queue)
crawler.start()
for item in result_queue.get():
#print datetime.datetime.now(),item
print item
示例5: compute_sp
def compute_sp(self):
from queue import Queue
queue = Queue()
datalen = len(self.D['coords'])
self(queue,0,datalen, True, False)
self(queue,0,datalen, False, False)
return queue.get() + queue.get()
示例6: TestBlockingMethods
class TestBlockingMethods(unittest.TestCase):
def setUp(self):
self.quiet=True
self.random = Random()
self._timers = []
self.namespaces = []
self.iface = PyRQIface(quiet=self.quiet, ref="test")
self.dummyQueue = Queue()
self.marshaller = MarshallerFactory.get(MarshallerFactory.DEFAULT, quiet=self.quiet)
desiredPort = "19001"
self.r = SubprocessQueueServer(
desiredPort=desiredPort,
handlerClazz=Linkage.create(MockHandler),
quiet=self.quiet
# includePydevd="/home/francis/.eclipse/org.eclipse.platform_3.7.0_155965261/plugins/org.python.pydev.debug_2.5.0.2012040618/pysrc"
)
PyRQIface.setGlobalPYRQ(self.r.details())
self.r.start().waitUntilRunning()
pass
def tearDown(self):
try:
self.dummyQueue.close()
del self.dummyQueue
except Exception, _e:
pass
for namespace in self.namespaces:
self.iface.setNamespace(namespace)
try: self.iface.close()
except ClosedError, _e:
pass
示例7: WorkerThreads
class WorkerThreads(object):
def __init__(self, threads=1):
"""
Initialize the thread pool and queues.
"""
self.pools = ThreadPool(processes=threads)
self.updater_queue = Queue()
def get_updater_queue(self):
return self.updater_queue
def updater(self, ident, state, meta):
"""
Updater function: This just post a message to a queue.
"""
self.updater_queue.put({'id': ident, 'state': state, 'meta': meta})
def pull(self, request, updater, testmode=0):
try:
pull(request, updater, testmode=testmode)
except Exception as err:
resp = {'error_type': str(type(err)),
'message': str(err)}
updater.update_status('FAILURE', 'FAILURE', response=resp)
def expire(self, request, updater):
try:
remove_image(request, updater)
except Exception as err:
resp = {'error_type': str(type(err)),
'message': str(err)}
updater.update_status('FAILURE', 'FAILURE', response=resp)
def wrkimport(self, request, updater, testmode=0):
try:
img_import(request, updater, testmode=testmode)
except Exception as err:
resp = {'error_type': str(type(err)),
'message': str(err)}
updater.update_status('FAILURE', 'FAILURE', response=resp)
def dopull(self, ident, request, testmode=0):
"""
Kick off a pull operation.
"""
updater = Updater(ident, self.updater)
self.pools.apply_async(self.pull, [request, updater],
{'testmode': testmode})
def doexpire(self, ident, request, testmode=0):
updater = Updater(ident, self.updater)
self.pools.apply_async(self.expire, [request, updater])
def dowrkimport(self, ident, request, testmode=0):
logging.debug("wrkimport starting")
updater = Updater(ident, self.updater)
self.pools.apply_async(self.wrkimport, [request, updater],
{'testmode': testmode})
示例8: setUp
def setUp(self):
self.notif_queue = Queue(1)
self.error_queue = Queue()
self.component = Component(self.notif_queue._reader,
CommonErrorStrategy(),
self.error_queue,
PostgresConnector(_POSTGRES_DSN))
self.component.log = MagicMock()
示例9: __init__
def __init__(self, maxsize=0):
"""Constructor
Parameters:
maxsize -- Maximium size of this Pile
"""
Queue.__init__(self, maxsize)
self._tasks = RawValue('i',0)
self._tasks_lock = Lock()
示例10: put
def put(self, obj, block=True, timeout=None):
Queue.put(self, obj, block, timeout)
self._put_counter.value += 1
if self.qsize() != 0:
self.cond_notempty.acquire()
try:
self.cond_notempty.notify_all()
finally:
self.cond_notempty.release()
示例11: start
def start(self,url):
# raise BadFormatError
items = []
# The part below can be called as often as you want
results = Queue()
crawler = CrawlerWorker(LinkedinSpider(url), results)
crawler.start()
for item in results.get():
items.append(dict(item))
return items
示例12: ServerSink
class ServerSink(iMockDebuggerSink):
def __init__(self, peerName, theTime, details, quiet):
self._peerName = peerName
self._methods = []
methods = iMockDebuggerSink()._getMethods()
self._methods = methods
self._terminate = False
self._details = details
self._qw = None
self._startMutex = Semaphore(0)
self._q = Queue()
self.quiet= quiet
self._marshaller = MarshallerFactory.get(MarshallerFactory.DEFAULT, quiet=quiet)
self._qw = QueueWriter(target=details, autoConnect=True, marshaller=self._marshaller, quiet=quiet)
self._qw.start()
self.thread = None
def start(self):
t = threading.Thread(target=self.run, args=[self._startMutex])
t.setName("ServerSink.%(P)s"%{"P":self._peerName})
t.setDaemon(True)
self.thread = t
self.thread.start()
return "server.sink.started"
def close(self):
self._terminate = True
try: self.thread.join()
except: pass
try: self._qw.close()
except: pass
try: self._q.close()
except: pass
return "server.sink.closed"
def waitUntilRunning(self, block=True, timeout=None):
self._startMutex.acquire(block=block, timeout=timeout)
return self
def __getattribute__(self, name):
if name in object.__getattribute__(self, "_methods"):
q = self._q
def wrapper(self, *args, **kwargs):
ServerSink._testPickleability((name, args, kwargs))
q.put((name, args, kwargs))
return wrapper
return object.__getattribute__(self, name)
def run(self, startMutex):
startMutex.release()
while self._terminate==False:
try:
data = self._q.get(block=True, timeout=1)
except Empty: pass
else:
ServerSink._testPickleability(data)
try:
self._qw.put(data, block=True, timeout=10)
except Exception, _e:
break
示例13: getResult
def getResult(self,key):
"This method return the result in imdb for given key"
spider = ImdbSpider(key)
result_queue = Queue()
crawler = CrawlerWorker(spider, result_queue)
crawler.start()
results = result_queue.get()
if len(results)>self.maxResult :
del results[self.maxResult:]
logging.debug('%s results', len(results))
return results
示例14: findCalibrationChessboard
def findCalibrationChessboard(image):
findTimeout = 10
patternSize = (7, 7) # Internal corners of 8x8 chessboard
grayImg = cv.CreateMat(image.rows, image.cols, cv.CV_8UC1)
cv.CvtColor(image, grayImg, cv.CV_RGB2GRAY)
cv.AddWeighted(grayImg, -1, grayImg, 0, 255, grayImg)
cornerListQueue = Queue()
def getCorners(idx, inImg, cornersQueue):
"""Search for corners in image and put them in the queue"""
print "{} Searching".format(idx)
_, corners = cv.FindChessboardCorners(inImg,
patternSize)
print "{} found {} corners".format(idx, len(corners))
saveimg(inImg, name="Chessboard_Search_{}".format(idx))
cornersQueue.put(corners)
for i in range(0, 12, 3):
img = cv.CloneMat(grayImg)
cv.Erode(img, img, iterations=i)
cv.Dilate(img, img, iterations=i)
p = multiprocessing.Process(target=lambda: getCorners(i, img, cornerListQueue))
p.daemon = True
p.start()
corners = []
while len(corners) != 49 and i > 0:
corners = cornerListQueue.get(True)
print "Got Result {}".format(i)
i -= 1
if len(corners) == 49:
# Debug Image
debugImg = cv.CreateMat(grayImg.rows, grayImg.cols, cv.CV_8UC3)
cv.CvtColor(grayImg, debugImg, cv.CV_GRAY2RGB)
for pt in corners:
pt = (int(pt[0]), int(pt[1]))
cv.Circle(debugImg, pt, 4, (255, 0, 0))
saveimg(debugImg, name="Corners_Found")
# //Debug Image
# Figure out the correct corner mapping
points = sorted([corners[42], corners[0], corners[6], corners[48]], key=lambda pt: pt[0] + pt[1])
if points[1][0] < points[2][0]:
points[1], points[2] = points[2], points[1] # swap tr/bl as needed
(tl, tr, bl, br) = points
warpCorners = [tl, tr, br, bl]
else:
print "Could not find corners"
warpCorners = []
return warpCorners
示例15: testDodgyActor
def testDodgyActor(self):
queue = Queue()
yield self.spawn(actor_class=DodgyActor, max_requests=1,
ioqueue=queue, on_event=on_event)
proxy = pulsar.get_actor().get_actor(self.a.aid)
self.assertEqual(proxy.name, 'dodgyactor')
queue.put(('request', 'Hello'))
c = 0
while c < 20:
if not proxy.is_alive():
break
else:
c += 1
yield pulsar.NOT_DONE
self.assertFalse(proxy.is_alive())