本文整理汇总了Python中multiprocessing.queues.Queue.get方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.get方法的具体用法?Python Queue.get怎么用?Python Queue.get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.queues.Queue
的用法示例。
在下文中一共展示了Queue.get方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: compute_sp
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import get [as 别名]
def compute_sp(self):
from queue import Queue
queue = Queue()
datalen = len(self.D['coords'])
self(queue,0,datalen, True, False)
self(queue,0,datalen, False, False)
return queue.get() + queue.get()
示例2: main
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import get [as 别名]
def main():
result_queue = Queue()
crawler = CrawlerWorker(CanberraWealtherSpider(), result_queue)
crawler.start()
for item in result_queue.get():
#print datetime.datetime.now(),item
print item
示例3: test_spy
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import get [as 别名]
def test_spy():
"""Test the measure spy working.
"""
q = Queue()
data = TaskDatabase()
spy = MeasureSpy(queue=q, observed_database=data,
observed_entries=('test',))
data.notifier(('test', 1))
assert q.get()
data.notifier(('test2', 1))
assert q.empty()
spy.close()
assert q.get() == ('', '')
示例4: UDPServer
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import get [as 别名]
class UDPServer(Process):
def __init__(self, queue):
Process.__init__(self, name = "UDPServer")
#self.daemon = True
self.queue = queue
self.shutdownQueue = Queue()
self.start()
def __checkShutdown(self):
try:
self.shutdownQueue.get(block = False)
except Empty, _e:
return self.reactor.callLater(1, self.__checkShutdown)
self.shutdownQueue.close()
if self.reactor.running:
self.reactor.stop()
self.queue.close()
示例5: test_multiprocess_tasks
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import get [as 别名]
def test_multiprocess_tasks():
wait_until_convenient()
TAG = "message_q"
def fetch_task(queue):
pid = os.getpid()
count = 0
for dq in q.listen(TAG, timeout=1):
s = { 'pid': pid, 'data': dq }
if dq:
count += 1
queue.put(s)
sleep(uniform(0.1, 0.5)) # sleep 0.1~0.5 seconds randomly
elif q.count(TAG) == 0:
return count # the number of tasks done by this process
test_items = range(0, 10000) # enqueue 10000 tasks
for i in test_items:
q.enqueue(TAG, i + 1)
while q.count(TAG) != len(test_items): # wait until test data is ready
wait_until_convenient()
jobs = []
wait_until_convenient()
queue = Queue()
start = timer()
num_p = 30 # the number of processes to use
for i in range(0, num_p):
job = Process(target=fetch_task, args=(queue,))
jobs.append(job)
job.start() # start task process
remaining = q.count(TAG)
while remaining > 0: # wait until the queue is consumed completely
remaining = q.count(TAG)
sys.stdout.write('\rRunning test_multiprocess_tasks - remaining %5d/%5d' % (remaining, len(test_items),))
sys.stdout.flush()
wait_until_convenient()
processed_data = set()
qsize = 0
while not queue.empty():
item = queue.get()
data = item.get('data')
qsize += 1
assert data not in processed_data, "failed test_multiprocess_tasks - data %s has been processed already" % (data, )
processed_data.add(item.get('data'))
queue.close()
queue.join_thread()
for j in jobs:
j.join()
assert qsize == len(test_items), "failed test_multiprocess_tasks - tasks are not complete %d/%d" % (qsize, len(test_items), )
end = timer()
print("\rOK test_multiprocess_tasks - %d done in %5d seconds" % (qsize, end - start))
示例6: get
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import get [as 别名]
def get(self, block=True, timeout=None):
ret = Queue.get(self, block, timeout)
if self.qsize() == 0:
self.cond_empty.acquire()
try:
self.cond_empty.notify_all()
finally:
self.cond_empty.release()
return ret
示例7: start
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import get [as 别名]
def start(self,url):
# raise BadFormatError
items = []
# The part below can be called as often as you want
results = Queue()
crawler = CrawlerWorker(LinkedinSpider(url), results)
crawler.start()
for item in results.get():
items.append(dict(item))
return items
示例8: ServerSink
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import get [as 别名]
class ServerSink(iMockDebuggerSink):
def __init__(self, peerName, theTime, details, quiet):
self._peerName = peerName
self._methods = []
methods = iMockDebuggerSink()._getMethods()
self._methods = methods
self._terminate = False
self._details = details
self._qw = None
self._startMutex = Semaphore(0)
self._q = Queue()
self.quiet= quiet
self._marshaller = MarshallerFactory.get(MarshallerFactory.DEFAULT, quiet=quiet)
self._qw = QueueWriter(target=details, autoConnect=True, marshaller=self._marshaller, quiet=quiet)
self._qw.start()
self.thread = None
def start(self):
t = threading.Thread(target=self.run, args=[self._startMutex])
t.setName("ServerSink.%(P)s"%{"P":self._peerName})
t.setDaemon(True)
self.thread = t
self.thread.start()
return "server.sink.started"
def close(self):
self._terminate = True
try: self.thread.join()
except: pass
try: self._qw.close()
except: pass
try: self._q.close()
except: pass
return "server.sink.closed"
def waitUntilRunning(self, block=True, timeout=None):
self._startMutex.acquire(block=block, timeout=timeout)
return self
def __getattribute__(self, name):
if name in object.__getattribute__(self, "_methods"):
q = self._q
def wrapper(self, *args, **kwargs):
ServerSink._testPickleability((name, args, kwargs))
q.put((name, args, kwargs))
return wrapper
return object.__getattribute__(self, name)
def run(self, startMutex):
startMutex.release()
while self._terminate==False:
try:
data = self._q.get(block=True, timeout=1)
except Empty: pass
else:
ServerSink._testPickleability(data)
try:
self._qw.put(data, block=True, timeout=10)
except Exception, _e:
break
示例9: getResult
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import get [as 别名]
def getResult(self,key):
"This method return the result in imdb for given key"
spider = ImdbSpider(key)
result_queue = Queue()
crawler = CrawlerWorker(spider, result_queue)
crawler.start()
results = result_queue.get()
if len(results)>self.maxResult :
del results[self.maxResult:]
logging.debug('%s results', len(results))
return results
示例10: findCalibrationChessboard
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import get [as 别名]
def findCalibrationChessboard(image):
findTimeout = 10
patternSize = (7, 7) # Internal corners of 8x8 chessboard
grayImg = cv.CreateMat(image.rows, image.cols, cv.CV_8UC1)
cv.CvtColor(image, grayImg, cv.CV_RGB2GRAY)
cv.AddWeighted(grayImg, -1, grayImg, 0, 255, grayImg)
cornerListQueue = Queue()
def getCorners(idx, inImg, cornersQueue):
"""Search for corners in image and put them in the queue"""
print "{} Searching".format(idx)
_, corners = cv.FindChessboardCorners(inImg,
patternSize)
print "{} found {} corners".format(idx, len(corners))
saveimg(inImg, name="Chessboard_Search_{}".format(idx))
cornersQueue.put(corners)
for i in range(0, 12, 3):
img = cv.CloneMat(grayImg)
cv.Erode(img, img, iterations=i)
cv.Dilate(img, img, iterations=i)
p = multiprocessing.Process(target=lambda: getCorners(i, img, cornerListQueue))
p.daemon = True
p.start()
corners = []
while len(corners) != 49 and i > 0:
corners = cornerListQueue.get(True)
print "Got Result {}".format(i)
i -= 1
if len(corners) == 49:
# Debug Image
debugImg = cv.CreateMat(grayImg.rows, grayImg.cols, cv.CV_8UC3)
cv.CvtColor(grayImg, debugImg, cv.CV_GRAY2RGB)
for pt in corners:
pt = (int(pt[0]), int(pt[1]))
cv.Circle(debugImg, pt, 4, (255, 0, 0))
saveimg(debugImg, name="Corners_Found")
# //Debug Image
# Figure out the correct corner mapping
points = sorted([corners[42], corners[0], corners[6], corners[48]], key=lambda pt: pt[0] + pt[1])
if points[1][0] < points[2][0]:
points[1], points[2] = points[2], points[1] # swap tr/bl as needed
(tl, tr, bl, br) = points
warpCorners = [tl, tr, br, bl]
else:
print "Could not find corners"
warpCorners = []
return warpCorners
示例11: yk_monitor
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import get [as 别名]
def yk_monitor(self, mon_l):
# forming command to run parallel monitoring processes
mon_cmd = ' & '.join(["xinput test {}".format(y_id) for y_id in mon_l])
monitor = subprocess.Popen(mon_cmd, shell=True, stdout=subprocess.PIPE)
stdout_queue = Queue()
stdout_reader = AsynchronousFileReader(monitor.stdout, stdout_queue)
stdout_reader.start()
triggered = False
timestamp = time.time()
while not stdout_reader.eof and time.time() - timestamp < TIMEOUT:
while stdout_queue.qsize() > 0:
stdout_queue.get() # emptying queue
triggered = True
time.sleep(.04)
if triggered:
print('YubiKey triggered. Now disabling.')
break
time.sleep(.001)
if not triggered:
print('No YubiKey triggered. Timeout.')
示例12: AsynchronousSolver
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import get [as 别名]
class AsynchronousSolver(threading.Thread):
tId = itertools.count()
r"""
@summary: Manages a suit of solver workers via queues, we could manage via a queue-like interface (PyRQ !).
"""
def __init__(self, solvers, lock):
self._solvers = solvers
self._lock = lock
self._q = Queue()
self._qDistributor = Queue()
self._queues = {}
self._type = SolverImplType.THREADED
super(AsynchronousSolver, self).__init__()
self._go()
def _go(self):
model = self._solvers._getModel()
for _ in xrange(self._solvers._count):
q = Queue()
tId = AsynchronousSolver.tId.next()
asyncSolverImpl = AsyncSolverFactory(self._type, tId, self._qDistributor, q, model.clone())
self._queues[tId] = (asyncSolverImpl, q)
asyncSolverImpl.start()
self.setName("AsynchronousSolver")
self.setDaemon(True)
self.start()
def _terminate(self):
self._informAbort()
for context in self._queues.values():
(thread, q) = context
try: q.put(Abort())
except: pass
try: thread.terminate()
except: pass
self._queues = {}
def get(self, *args, **kwargs):
try:
data = self._q.get(*args, **kwargs)
except Empty:
raise
except Exception, e:
print "AsynchronousSolver get: error:\n%(T)s"%{"T":traceback.format_exc()}
raise e
else:
示例13: get
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import get [as 别名]
def get(self):
'''
Get the element in the queue
Raises an exception if it's empty or if too many errors are
encountered
'''
dt = 1e-3
while dt < 1:
try:
element = Queue.get(self)
return element
except IOError:
logger.warning('IOError encountered in SafeQueue get()')
try:
time.sleep(dt)
except:pass
dt *= 2
e = IOError('Unrecoverable error')
raise e
示例14: parallel_sort
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import get [as 别名]
def parallel_sort(bam, out, n_workers):
lb = BGZFReader(bam)
mem = lb.uncompressed_size
buf = RawArray(ctypes.c_char, mem)
q = Queue()
procs = []
block_allocs = chunk(lb.blocks, n_workers)
offsets = [0] + list(accumulate(sum(b.offset for b in blocks)
for blocks in block_allocs))[:-1]
ary_szs = [sum([b.size_u for b in blocks]) for blocks in block_allocs]
bufs = [RawArray(ctypes.c_char,mem) for mem in ary_szs]
z = zip(chunk(lb.blocks, n_workers), offsets, bufs)
for i,(blocks,off,buf) in enumerate(z):
args = (i, bam, blocks, off, buf, q)
p = Process(target=sort_read_ary, args=args)
procs.append(p)
p.start()
combined = []
for _ in procs:
combined += q.get(True)
logging.debug("Starting combined sort on %i reads" % len(combined))
combined.sort()
logging.debug("Finished combined sort")
for p in procs:
p.join()
logging.debug("Returned from " + str(p))
hdr = RawBAM(gzip.GzipFile(bam), header=True).rawheader
with open(out, 'wb') as f:
write_bgzf_block(f, hdr)
for creads in grouper(READS_PER_BLOCK, combined):
data = ""
for i,cr in enumerate(creads):
data += bufs[cr.worker_num][cr.ptr:(cr.ptr+cr.bs+4)]
write_bgzf_block(f, data)
write_bam_eof(f)
示例15: mp_factorizer
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import get [as 别名]
def mp_factorizer(nums, nprocs):
def worker(nums, out_q):
""" The worker function, invoked in a process. 'nums' is a
list of numbers to factor. The results are placed in
a dictionary that's pushed to a queue.
"""
outdict = {}
for n in nums:
outdict[n] = factorize_naive(n)
out_q.put(outdict)
# Each process will get 'chunksize' nums and a queue to put his out
# dict into
out_q = Queue()
chunksize = int(math.ceil(len(nums) / float(nprocs)))
procs = []
for i in range(nprocs):
p = multiprocessing.Process(
target=worker,
args=(nums[chunksize * i:chunksize * (i + 1)],
out_q))
procs.append(p)
p.start()
# Collect all results into a single result dict. We know how many dicts
# with results to expect.
resultdict = {}
for i in range(nprocs):
resultdict.update(out_q.get())
# Wait for all worker processes to finish
for p in procs:
p.join()
return resultdict