本文整理匯總了Python中queue.get方法的典型用法代碼示例。如果您正苦於以下問題:Python queue.get方法的具體用法?Python queue.get怎麽用?Python queue.get使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類queue
的用法示例。
在下文中一共展示了queue.get方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: make_web
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import get [as 別名]
def make_web(queue):
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
def gen():
while True:
frame = queue.get()
_, frame = cv2.imencode('.JPEG', frame)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame.tostring() + b'\r\n')
@app.route('/video_feed')
def video_feed():
return Response(gen(),
mimetype='multipart/x-mixed-replace; boundary=frame')
try:
app.run(host='0.0.0.0', port=8889)
except:
print('unable to open port')
示例2: do_log_chat
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import get [as 別名]
def do_log_chat(time, event, metadata):
"""
Add a new message to the chat log.
"""
# Don't log blank lines or server commands like .timeout
message = event.arguments[0]
if not message or (message[0] in "./" and message[1:4].lower() != "me "):
return
source = irc.client.NickMask(event.source).nick
html = await build_message_html(time, source, event.target, event.arguments[0], metadata.get('specialuser', []), metadata.get('usercolor'), metadata.get('emoteset', []), metadata.get('emotes'), metadata.get('display-name'))
with lrrbot.main.bot.engine.begin() as conn:
conn.execute(lrrbot.main.bot.metadata.tables["log"].insert(),
time=time,
source=source,
target=event.target,
message=event.arguments[0],
specialuser=list(metadata.get('specialuser', [])),
usercolor=metadata.get('usercolor'),
emoteset=list(metadata.get('emoteset', [])),
emotes=metadata.get('emotes'),
displayname=metadata.get('display-name'),
messagehtml=html,
msgid=metadata.get('id'),
)
示例3: _reflect_end_run
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import get [as 別名]
def _reflect_end_run(self, record: Mapping) -> None:
"""Reflect and end run sigil into the complete topic
This is handled directly in the consumer thread, rather than as part of the
work queue, to ensure that the offset is not committed to kafka until after
processing is completed and it has been sucessfully reflected.
Parameters
----------
record : dict
Deserialized end run sigil
"""
log.info('reflecting end sigil for run %s and partition %d' %
(record['run_id'], record['partition']))
# Wait for everything to at least start processing. We don't
# actually know when the workers are finally idle.
self.work_queue.join()
future = self.ack_all_producer.send(
self.topic_complete, json.dumps(record).encode('utf8'))
future.add_errback(lambda e: log.critical(
'Failed to send the "end run" message: %s', e))
# Wait for ack (or failure to ack)
future.get()
示例4: run
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import get [as 別名]
def run(self):
_ClientHandler.log_info("csocketsendThread(); socket.send thread start")
self._tx=None
while self.__threadrun==True:
try:
# get queue-value in blocking mode
self._tx=self._queue.get(True)
self._queue.task_done()
except:
self.__threadrun=False
_ClientHandler.log_critical("csocketsendThread();Error on queue.get()")
raise
try:
self._request.sendall(bytes(self._tx))
except:
self.__threadrun=False
_ClientHandler.log_critical("csocketsendThread();Error on socket.send")
raise
_ClientHandler.log_info("csocketsendThread(); socket.send thread terminated")
示例5: decode
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import get [as 別名]
def decode(queue, log_probs, decoder, index2label):
while not queue.empty():
try:
video = queue.get(timeout = 3)
score, labels, segments = decoder.decode( log_probs[video] )
# save result
with open('results/' + video, 'w') as f:
f.write( '### Recognized sequence: ###\n' )
f.write( ' '.join( [index2label[s.label] for s in segments] ) + '\n' )
f.write( '### Score: ###\n' + str(score) + '\n')
f.write( '### Frame level recognition: ###\n')
f.write( ' '.join( [index2label[l] for l in labels] ) + '\n' )
except queue.Empty:
pass
### read label2index mapping and index2label mapping ###########################
示例6: test_concurrent_cloudnodiness_reset_request
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import get [as 別名]
def test_concurrent_cloudnodiness_reset_request(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
#time.sleep(2)
test_record = self.test_table.get(test_method_name)
#perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
request_body = test_functions.get("http_request_body")
try:
json_request_body = json.loads(request_body)
#reset the request body to file arguments for next iteration
json_request_body['cloudagent_ip'] = argument["ip_file"]
json_request_body['cloudagent_port'] = argument["port_file"]
test_functions['http_request_body'] = json.dumps(json_request_body)
except Exception as e:
self.fail("Problem in test_concurrent_cloudnodiness_modify_request() replacing cloudagent_ip or cloudagent_port. Error: %s"%e)
示例7: check_test_persistance_file_write
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import get [as 別名]
def check_test_persistance_file_write(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
uuid_str = argument
#perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
try:
with open(cv_persistence_filename, "r") as persistance_file:
file_contents = persistance_file.read()
json_content = json.loads(file_contents)
if len(json_content) != 1 or json_content.get(uuid_str) is None:
self.fail("Unexpected persistence file contents.")
except Exception as e:
self.fail("Problem reading persistence file after POST. Error: %s"%e)
try:
with open(cv_persistence_filename + ".bak", "r") as backup_persistance_file:
backup_file_contents = backup_persistance_file.read()
json_backup_content = json.loads(backup_file_contents)
if len(json_backup_content) != 0:
self.fail("Unexpected backup persistence file contents.")
except Exception as e:
self.fail("Problem reading backup persistence file after POST. Error: %s"%e)
示例8: check_test_persistance_file_load
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import get [as 別名]
def check_test_persistance_file_load(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
uuid_str = argument
#perform each of the test functions and store the results
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
jsondecoded = json.loads(target_body)
# test to make sure these two keys (and values) are in the return
if len(jsondecoded) != 1 or jsondecoded.get(uuid_str) is None :
self.fail("Expected " + uuid_str + " to be in the list of active agent_ids")
# def do_mock_for_test_cloudverifier_tenant_provide_v(self, argument):
# global text_callback
# nonce = tpm_initialize.random_password(20)
# tpm_policy = {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff" }
# #theurl = 'http://' + cloudagent_ip + ':' + cloudagent_port + "/v1/quotes/cloudverifier" + "?nonce=" + nonce + "&mask=" + tpm_policy['mask']
# theurl = 'http://' + cloudagent_ip + ':' + cloudagent_port + "/v1/quotes/cloudverifier"
# with requests_mock.Mocker(real_http=True) as m:
# m.get(requests_mock.ANY, text=text_callback)
示例9: check_validate_test_cloudverifier_tenant_provide_v
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import get [as 別名]
def check_validate_test_cloudverifier_tenant_provide_v(self, test_method_name, test_function_name, state_change_or_validation, test_iteration, argument):
test_record = self.test_table.get(test_method_name)
#lookup test data and compare the results to canned values
for test_functions in test_record[state_change_or_validation]:
if test_functions.get("function_name") == test_function_name:
target_body = test_functions.get("http_result_body_actual")
jsondecoded = json.loads(target_body)
v = jsondecoded.get("v")
ip = jsondecoded.get("ip")
port = jsondecoded.get("port")
tpm_policy = jsondecoded.get("tpm_policy")
if v is None or v != "nsRIy93UeeAi3GhAxpEcMH6R7OmaB7ArBdn2bEgyEwU=":
self.fail("Returned v from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.")
if ip is None or ip != "127.0.0.1":
self.fail("Returned ip from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.")
if port is None or port != "8882":
self.fail("Returned port from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.")
if tpm_policy is None or tpm_policy != {"00": "0000000000000000000000000000000000000000", "mask": "0x400801", "22": "ffffffffffffffffffffffffffffffffffffffff"}:
self.fail("Returned tpm_policy from instance 06480EC4-6BF3-4F00-8323-FE6AE5868297 was not correct.")
示例10: execute_test_definition
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import get [as 別名]
def execute_test_definition(self):
test_record = self.test_table.get(self._testMethodName)
prerun_function_dict = test_record.get("prerun_function")
if prerun_function_dict is not None:
prerun_function_name = prerun_function_dict.get("name")
prerun_function_args = prerun_function_dict.get("args")
function_return = getattr(self, prerun_function_name)(prerun_function_args)
self.execute_test_function_set("setup_functions")
self.execute_test_function_set("state_change_functions")
self.execute_test_function_set("state_validation_functions")
postrun_function_dict = test_record.get("postrun_function")
if postrun_function_dict is not None:
postrun_function_name = postrun_function_dict.get("name")
postrun_function_args = postrun_function_dict.get("args")
function_return = getattr(self, postrun_function_name)(postrun_function_args)
示例11: process_5p
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import get [as 別名]
def process_5p(chrom, positions, strand, vertex_IDs, gene_ID, gene_starts, edge_dict,
locations, run_info):
""" Conduct permissive match for 5' end and return assigned vertex,
edge, and distance """
# First get a permissively matched start vertex
start_vertex, diff_5p, known_start = permissive_match_with_gene_priority(chrom,
positions[0], strand, positions[1],
"start", gene_ID, gene_starts,
locations, run_info)
if start_vertex == None:
start_vertex = create_vertex(chrom, positions[0], locations, run_info)['location_ID']
# Then get the start exon
start_exon, start_novelty = match_or_create_edge(start_vertex,
vertex_IDs[0],
"exon", strand,
edge_dict)
# If known_start == 1, the start vertex is a known startpoint of this gene.
# start novelty refers to the novelty of the first exon (1 if yes, 0 if no)
return start_vertex, start_exon, start_novelty, known_start, diff_5p
示例12: process_3p
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import get [as 別名]
def process_3p(chrom, positions, strand, vertex_IDs, gene_ID, gene_ends, edge_dict,
locations, run_info):
""" Conduct permissive match for 3' end and return assigned vertex,
edge, and distance """
# First get a permissively matched end vertex
end_vertex, diff_3p, known_end = permissive_match_with_gene_priority(chrom,
positions[-1], strand, positions[-2],
"end", gene_ID, gene_ends,
locations, run_info)
if end_vertex == None:
end_vertex = create_vertex(chrom, positions[-1], locations, run_info)['location_ID']
# Then get the end exon
end_exon, end_novelty = match_or_create_edge(vertex_IDs[-1],
end_vertex,
"exon", strand,
edge_dict)
# If known_end == 1, the end vertex is a known endpoint of this gene.
# end novelty refers to the novelty of the final exon (1 if yes, 0 if no)
return end_vertex, end_exon, end_novelty, known_end, diff_3p
示例13: find_gene_match_on_vertex_basis
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import get [as 別名]
def find_gene_match_on_vertex_basis(vertex_IDs, strand, vertex_2_gene):
""" Use vertices in a transcript to try to pinpoint the gene it belongs to.
"""
gene_matches = []
for vertex in vertex_IDs:
if vertex in vertex_2_gene:
curr_matches = vertex_2_gene[vertex]
# Make sure the gene is on the correct strand
gene_matches += [ x[0] for x in curr_matches if x[1] == strand ]
if len(gene_matches) == 0:
return None
# Now count up how often we see each gene
gene_tally = dict((x,gene_matches.count(x)) for x in set(gene_matches))
# TODO: deal with fusions
# For the main assignment, pick the gene that is observed the most
gene_ID = max(gene_tally, key=gene_tally.get)
return gene_ID
示例14: consume
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import get [as 別名]
def consume(queue):
"""Consumer client to simulate subscribing to a publisher.
Args:
queue (queue.Queue): Queue from which to consume messages.
"""
while True:
# wait for an item from the publisher
msg = queue.get()
# the publisher emits None to indicate that it is done
if msg is None:
break
# process the msg
logging.info(f"Consumed {msg}")
# simulate i/o operation using sleep
time.sleep(random.random())
示例15: consumer
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import get [as 別名]
def consumer(queue, stack, apix=1.0, iothreads=None):
log = logging.getLogger('root')
with mrc.ZSliceWriter(stack, psz=apix) as zwriter:
while True:
log.debug("Get")
i, ri = queue.get(block=True)
log.debug("Got %d, queue for %s is size %d" %
(i, stack, queue.qsize()))
if i == -1:
break
new_image = ri.get()
log.debug("Result for %d was shape (%d,%d)" %
(i, new_image.shape[0], new_image.shape[1]))
zwriter.write(new_image)
queue.task_done()
log.debug("Wrote %d to %d@%s" % (i, zwriter.i, stack))
if iothreads is not None:
iothreads.release()