本文整理汇总了Python中six.moves.queue.get方法的典型用法代码示例。如果您正苦于以下问题:Python queue.get方法的具体用法?Python queue.get怎么用?Python queue.get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类six.moves.queue
的用法示例。
在下文中一共展示了queue.get方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __iter__
# 需要导入模块: from six.moves import queue [as 别名]
# 或者: from six.moves.queue import get [as 别名]
def __iter__(self):
ds_itr = _repeat_iter(self.ds.get_data)
with self._guard:
while True:
dps = []
for k in range(self.nr_proc):
dps.append(copy.copy(next(ds_itr)))
to_map = [x[self.index] for x in dps]
res = self._pool.map_async(_pool_map, to_map)
for index in res.get():
if index is None:
continue
arr = np.reshape(self._shared_mem[index], self.output_shape)
dp = dps[index]
dp[self.index] = arr.copy()
yield dp
# alias
示例2: test_process
# 需要导入模块: from six.moves import queue [as 别名]
# 或者: from six.moves.queue import get [as 别名]
def test_process():
from multiprocessing import Queue
from lib.config import cfg
from lib.data_io import category_model_id_pair
cfg.TRAIN.PAD_X = 10
cfg.TRAIN.PAD_Y = 10
data_queue = Queue(2)
category_model_pair = category_model_id_pair(dataset_portion=[0, 0.1])
data_process = ReconstructionDataProcess(data_queue, category_model_pair)
data_process.start()
batch_img, batch_voxel = data_queue.get()
kill_processes(data_queue, [data_process])
示例3: _request_wrapper
# 需要导入模块: from six.moves import queue [as 别名]
# 或者: from six.moves.queue import get [as 别名]
def _request_wrapper(self, queue, url, params, timeout):
"""
Wrapper to requests used by each thread.
Parameters
----------
queue : Queue.Queue
The Queue to write the response from the request in.
url : str
The URL to be queried.
params : dict
A dictionary of parameters to pass to the request.
timeout : int
Timeout to wait for a response to the request.
"""
response = self.session.get(url, params=params, verify=self.verify, timeout=timeout)
queue.put(response)
示例4: receive
# 需要导入模块: from six.moves import queue [as 别名]
# 或者: from six.moves.queue import get [as 别名]
def receive(self, conn_name=(None, None), timeout=30):
"""
Receive a FIX message from the given connection.
The connection name defaults to ``(None, None)``. In this case,
the server will try to find the one and only available connection.
This will fail if there are more connections available or if the initial
connection is no longer active.
:param conn_name: Connection name to receive message from
:type conn_name: ``tuple`` of ``str`` and ``str``
:param timeout: timeout in seconds
:type timeout: ``int``
:return: Fix message received
:rtype: ``FixMessage``
"""
conn_name = self._validate_connection_name(conn_name)
return self._conndetails_by_name[conn_name].queue.get(True, timeout)
示例5: __iter__
# 需要导入模块: from six.moves import queue [as 别名]
# 或者: from six.moves.queue import get [as 别名]
def __iter__(self):
ds_itr = _repeat_iter(self.ds.get_data)
with self._guard:
while True:
dps = []
for k in range(self.nr_proc):
dps.append(copy.copy(next(ds_itr)))
to_map = [x[self.index] for x in dps]
res = self._pool.map_async(_pool_map, to_map)
for index in res.get():
if index is None:
continue
arr = np.reshape(self._shared_mem[index], self.output_shape)
dp = dps[index]
dp[self.index] = arr.copy()
yield dp
示例6: _recv
# 需要导入模块: from six.moves import queue [as 别名]
# 或者: from six.moves.queue import get [as 别名]
def _recv(self):
return self._out_queue.get()
示例7: __init__
# 需要导入模块: from six.moves import queue [as 别名]
# 或者: from six.moves.queue import get [as 别名]
def __init__(self, ds, nr_proc, map_func, output_shape, output_dtype, index=0):
"""
Args:
ds (DataFlow): the dataflow to map on
nr_proc(int): number of processes
map_func (data component -> ndarray | None): the mapping function
output_shape (tuple): the shape of the output of map_func
output_dtype (np.dtype): the type of the output of map_func
index (int): the index of the datapoint component to map on.
"""
self.ds = ds
self.nr_proc = nr_proc
self.map_func = map_func
self.output_shape = output_shape
self.output_dtype = np.dtype(output_dtype).type
self.index = index
self._shared_mem = [self._create_shared_arr() for k in range(nr_proc)]
id_queue = mp.Queue()
for k in range(nr_proc):
id_queue.put(k)
def _init_pool(arrs, queue, map_func):
id = queue.get()
global SHARED_ARR, WORKER_ID, MAP_FUNC
SHARED_ARR = arrs[id]
WORKER_ID = id
MAP_FUNC = map_func
self._pool = mp.pool.Pool(
processes=nr_proc,
initializer=_init_pool,
initargs=(self._shared_mem, id_queue, map_func))
示例8: kill_processes
# 需要导入模块: from six.moves import queue [as 别名]
# 或者: from six.moves.queue import get [as 别名]
def kill_processes(queue, processes):
print('Signal processes')
for p in processes:
p.shutdown()
print('Empty queue')
while not queue.empty():
time.sleep(0.5)
queue.get(False)
print('kill processes')
for p in processes:
p.terminate()
示例9: data_resources
# 需要导入模块: from six.moves import queue [as 别名]
# 或者: from six.moves.queue import get [as 别名]
def data_resources(self):
"""
List of all available data resources, meaning resources that return data.
"""
if self._data_resources:
return list(self._data_resources.keys())
data_resources_url = six.moves.urllib.parse.urljoin(self.base_url, "data.json")
response = self.session.get(data_resources_url, verify=self.verify)
response_json = self._parse(response)
for entry in response_json["queryResponse"]["entityType"]:
self._data_resources[entry["$"]] = "%s.json" % entry["@url"]
return list(self._data_resources.keys())
示例10: service_resources
# 需要导入模块: from six.moves import queue [as 别名]
# 或者: from six.moves.queue import get [as 别名]
def service_resources(self):
"""
List of all available service resources, meaning resources that modify the NMS.
"""
if self._service_resources:
return list(self._service_resources.keys())
service_resources_url = six.moves.urllib.parse.urljoin(self.base_url, "op.json")
response = self.session.get(service_resources_url, verify=self.verify)
response_json = self._parse(response)
for entry in response_json["queryResponse"]["operation"]:
self._service_resources[entry["$"]] = {"method": entry["@httpMethod"], "url": six.moves.urllib.parse.urljoin(self.base_url, "op/%s.json" % entry["@path"])}
return list(self._service_resources.keys())
示例11: _flush_queue
# 需要导入模块: from six.moves import queue [as 别名]
# 或者: from six.moves.queue import get [as 别名]
def _flush_queue(self, queue):
"""
Flush the given receive queue.
:param queue: Queue to flush.
:type queue: ``queue``
"""
try:
while True:
queue.get(False)
except queue.Empty:
return
示例12: __init__
# 需要导入模块: from six.moves import queue [as 别名]
# 或者: from six.moves.queue import get [as 别名]
def __init__(self, ds, nr_proc, map_func, output_shape, output_dtype, index=0):
"""
Args:
ds (DataFlow): the dataflow to map on
nr_proc(int): number of processes
map_func (data component -> ndarray | None): the mapping function
output_shape (tuple): the shape of the output of map_func
output_dtype (np.dtype): the type of the output of map_func
index (int): the index of the datapoint component to map on.
"""
self.ds = ds
self.nr_proc = nr_proc
self.map_func = map_func
self.output_shape = output_shape
self.output_dtype = np.dtype(output_dtype).type
self.index = index
self._shared_mem = [self._create_shared_arr() for k in range(nr_proc)]
id_queue = mp.Queue()
for k in range(nr_proc):
id_queue.put(k)
def _init_pool(arrs, queue, map_func):
id = queue.get()
global SHARED_ARR, WORKER_ID, MAP_FUNC
SHARED_ARR = arrs[id]
WORKER_ID = id
MAP_FUNC = map_func
self._pool = mp.pool.Pool(
processes=nr_proc,
initializer=_init_pool,
initargs=(self._shared_mem, id_queue, map_func))
self._guard = DataFlowReentrantGuard()