本文整理汇总了Python中queue.get函数的典型用法代码示例。如果您正苦于以下问题:Python get函数的具体用法?Python get怎么用?Python get使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: generate_in_background
def generate_in_background(generator, num_cached=10):
"""
Runs a generator in a background thread, caching up to `num_cached` items.
"""
import queue
queue = queue.Queue(maxsize=num_cached)
sentinel = object() # guaranteed unique reference
# define producer (putting items into queue)
def producer():
for item in generator:
queue.put(item)
queue.put(sentinel)
# start producer (in a background thread)
import threading
thread = threading.Thread(target=producer)
thread.daemon = True
thread.start()
# run as consumer (read items from queue, in current thread)
item = queue.get()
while item is not sentinel:
yield item
item = queue.get()
示例2: worker
def worker(id, queue):
while True:
try:
obj = queue.get(timeout=3)
print(id, obj)
except:
break
示例3: run_job
def run_job(obj, gpuid, jobfun, jobargs):
queue = obj.queue
while not obj.shutdown:
path = None
try:
path = queue.get(timeout=1)
except:
pass
if path:
try:
logger.info("Running " + path + " on GPU device " + str(gpuid))
obj.running(path)
try:
jobfun(*jobargs, path=path, gpuid=gpuid)
except:
obj.completed(path)
queue.task_done()
continue
logger.info("Completed " + path)
obj.completed(path)
queue.task_done()
except:
logger.error("Error running job {}".format(path))
obj.completed(path)
queue.task_done()
continue
logger.info("Shutting down worker thread")
示例4: run
def run(self):
while True:
message = queue.get()
logging.info("Main server recieved: %s" % (sub("\0", "!",
message)))
for con in self.conns:
con.send(message)
示例5: blockingCallFromThread
def blockingCallFromThread(f, *a, **kw):
"""
Run a function in the reactor from a thread, and wait for the result
synchronously, i.e. until the callback chain returned by the function get a
result.
@param f: the callable to run in the reactor thread
@type f: any callable.
@param a: the arguments to pass to C{f}.
@param kw: the keyword arguments to pass to C{f}.
@return: the result of the callback chain.
@raise: any error raised during the callback chain.
"""
from twisted.internet import reactor
queue = queue.Queue()
def _callFromThread():
result = defer.maybeDeferred(f, *a, **kw)
result.addBoth(queue.put)
reactor.callFromThread(_callFromThread)
result = queue.get()
if isinstance(result, failure.Failure):
# This makes it easier for the debugger to get access to the instance
try:
result.raiseException()
except Exception as e:
raise e
return result
示例6: next_item
def next_item(self):
queue = self._queue
try:
item = queue.get(block=True, timeout=5)
return item
except Exception:
return None
示例7: ucs
def ucs(source, target, graph):
""" Uniform-cost graph search """
queue = queue.PriorityQueue() # fringe
queue.put((0, source))
parent = {source:None}
visited = {}
while not queue.empty():
(d, v_in) = queue.get()
if v_in not in visited or d < visited[v_in]:
if v_in == target:
return (d, build_path(parent, target))
for v_out in graph.adj(v_in):
cost = graph.distance(v_in, v_out) + d
if v_out not in visited:
queue.put((cost, v_out))
parent[v_out] = v_in
visited[v_in] = cost
return None
示例8: storeResults
def storeResults(queue, key):
server = redis.StrictRedis()
while True:
result = queue.get()
if result == 'END':
break
server.rpush(key, json.dumps(result))
示例9: default_filter
def default_filter(queue, *args):
while True:
line = queue.get()
if not line:
self.logger.debug('Process exiting (status_loop)')
break
yield line
示例10: run_job
def run_job(self, deviceid):
queue = self._queue
while not self._shutdown:
path = None
try:
path = queue.get(timeout=1)
except:
pass
if path:
if deviceid is None:
logger.info('Running ' + path)
else:
logger.info("Running " + path + " on device " + str(deviceid))
self._setRunning(path)
runsh = os.path.join(path, 'run.sh')
jobsh = os.path.join(path, 'job.sh')
self._createJobScript(jobsh, path, runsh, deviceid)
try:
ret = check_output(jobsh)
logger.debug(ret)
except Exception as e:
logger.info('Error in simulation {}. {}'.format(path, e))
self._setCompleted(path)
queue.task_done()
continue
logger.info("Completed " + path)
self._setCompleted(path)
queue.task_done()
logger.info("Shutting down worker thread")
示例11: sender
def sender(queue):
# TODO set time limit for checking
header = Header()
header.size = 1416
while(True):
#set up connection
payload = bytearray()
payload.extend(header.serialize())
count = 0
while(count < 22):
#print("have %d sludge " % (count))
hash = queue.get()
payload.extend(hash)
count += 1
try:
print("sending sludge downstream")
attempts = 0
while ( attempts < 10 ):
try:
sludge_outgoing = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sludge_outgoing.connect(("downstream", 4444))
sludge_outgoing.send(payload)
sludge_outgoing.close()
break;
except:
traceback.print_exc()
if ( attemps == 9 ):
raise Exception
attempts +=1
except:
# TODO make error log
f = open('/home/sbartholomew/sludgeOut', 'wb')
f.write(payload)
f.close()
示例12: multiCommand
def multiCommand(commands):
maxAlterations = int(max([i[2] for i in commands]) * frameRate)
queueList = []
queueLock.acquire()
while not queue.empty():
queueList.append(queue.get())
queue.task_done()
appends = maxAlterations - len(queueList)
if appends > 0:
for i in range(abs(appends)):
queueList.append({})
for c in commands:
commandAlterations = int(c[2] * frameRate)
for i in range(c[0][0], c[0][1]):
start = pixels[i]
bridgeGenerator = bridgeValues(commandAlterations, start, c[1])
for m in range(commandAlterations):
queueList[m][i] = next(bridgeGenerator)
if appends < 0:
for r in range(abs(appends)):
if i in queueList[commandAlterations + r]:
del queueList[commandAlterations + r][i]
while queueList:
queue.put(queueList.pop(0))
queueLock.release()
示例13: _process_batch
def _process_batch():
dev_grad_batch, dev_events, job_event = queue.get()
dev_coalesced = []
# Coalesce the tensors on all devices and start a local reduction
for dev_id, grad_batch, event, stream in zip(device_ids, dev_grad_batch, dev_events, reduction_streams):
with torch.cuda.device(dev_id), torch.cuda.stream(stream):
stream.wait_event(event)
coalesced = _flatten_tensors(grad_batch)
dev_coalesced.append(coalesced)
# Wait for all copies to complete before starting the NCCL kernel
for stream in reduction_streams:
stream.synchronize()
nccl.reduce(dev_coalesced, root=device_ids[0], streams=nccl_streams)
# From now on we're only going to work on the first device (from device_ids)
grad_batch = dev_grad_batch[0]
coalesced = dev_coalesced[0]
reduce_stream = reduction_streams[0]
with torch.cuda.stream(reduce_stream):
reduce_stream.wait_stream(nccl_streams[0])
coalesced /= dist.get_world_size()
dist.all_reduce(coalesced, group=group_id)
for grad, reduced in zip(grad_batch, _unflatten_tensors(coalesced, grad_batch)):
grad.copy_(reduced)
job_event.set()
示例14: absoluteFade
def absoluteFade(indexes, rgb, fadeTime):
'''Is given a color to fade to, and executes fade'''
if not fadeTime:
fadeTime = 1 / frameRate
for c in rgb:
c = makeEightBit(c)
#Calculates how many individual fade frames are needed
alterations = int(fadeTime * frameRate)
queueList = []
queueLock.acquire()
while not queue.empty():
queueList.append(queue.get())
queue.task_done()
#Amount of frames that need to be added to queue
appends = alterations - len(queueList)
#fill out the queue with blank dictionaries to populate
if appends > 0:
for i in range(abs(appends)):
queueList.append({})
#Iterate down indexes, figure out what items in queue need to be altered
for i in indexes:
#INVESTIGATE: THIS MIGHT BE THE SOURCE OF FLASHING ISSUES AT THE START OF A COMMAND
start = pixels[i]
bridgeGenerator = bridgeValues(alterations, start, rgb)
for m in range(alterations):
queueList[m][i] = next(bridgeGenerator)
#If this command overrides a previous command to the pixel, it should wipe any commands remaining
if appends < 0:
for r in range(abs(appends)):
if i in queueList[alterations + r]:
del queueList[alterations + r][i]
while queueList:
queue.put(queueList.pop(0))
queueLock.release()
示例15: worker
def worker(queue, args):
while True:
action = queue.get()
if action is None:
break
doaction(action, args)
queue.task_done()