本文整理匯總了Python中queue.deque方法的典型用法代碼示例。如果您正苦於以下問題:Python queue.deque方法的具體用法?Python queue.deque怎麽用?Python queue.deque使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類queue
的用法示例。
在下文中一共展示了queue.deque方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: fill_queue
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import deque [as 別名]
def fill_queue(self):
if self.results is None:
self.results = queue.deque(maxlen=self.max_queue)
if self.num_workers > 0:
if self.pool is None:
self.pool = Pool(processes=self.num_workers)
while len(self.results) < self.max_queue:
if self.distinct_levels is not None and self.idx >= self.distinct_levels:
break
elif not self.repeat_levels and self.idx >= len(self.file_data):
break
else:
data = self.get_next_parameters()
if data is None:
break
self.idx += 1
kwargs = {'seed': self._seed.spawn(1)[0]}
if self.num_workers > 0:
result = self.pool.apply_async(_game_from_data, data, kwargs)
else:
result = _game_from_data(*data, **kwargs)
self.results.append((data, result))
示例2: __init__
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import deque [as 別名]
def __init__(self, name, topic: (str, list) = None, *args, **kwargs):
self.name = name
self.topic = set(topic if isinstance(topic, list) else
[topic]) if topic is not None else set()
self.ctx = zmq_ctx
self.sub_socket = self.ctx.socket(zmq.SUB)
self.sub_socket.setsockopt(zmq.RCVTIMEO, 3000)
self._thread_pool = ThreadPoolExecutor(max_workers=1)
self.inproc = set()
if self.topic: # 如果topic默認為None,則對所有的topic做處理
for t in self.topic:
self.sub_socket.setsockopt(zmq.SUBSCRIBE, pickle.dumps(t))
else:
self.sub_socket.subscribe('')
if kwargs.get('latest', False): # 可以通過latest(bool)來訂閱最新的數據
self.data_queue = deque(maxlen=1)
self.latest = True
else:
self.data_queue = deque()
self.latest = False
self.__active = False
示例3: reset
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import deque [as 別名]
def reset(self):
obs = self.env.reset()
self._prior_positions = queue.deque(
[self.game.agent_loc], self.movement_bonus_period)
return obs
示例4: __getstate__
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import deque [as 別名]
def __getstate__(self):
state = self.__dict__.copy()
if self.num_workers > 0:
# Don't pickle the multiprocessing pool, and wait on all queued results.
state['pool'] = None
state['results'] = queue.deque([
r.get() if isinstance(r, ApplyResult) else r
for r in self.results
], maxlen=self.max_queue)
return state
示例5: profile
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import deque [as 別名]
def profile(func):
name = func.__name__
samples = queue.deque(maxlen=5)
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if not self.debug_enabled:
return func(self, *args, **kwargs)
start = clock()
ret = func(self, *args, **kwargs)
n = tfloat(clock() - start)
if len(samples) < 2:
m = 0
d = 0
n.color = 36
else:
m = mean(samples)
if stdev:
d = tfloat(stdev(samples))
else:
d = 0
if n <= m + d:
n.color = 32
elif n > m + d * 2:
n.color = 31
else:
n.color = 33
samples.append(n)
self.info("\x1b[34m%s\x1b[m t = %s, \u00b5 = %s, \u03c3 = %s)", name, n, m, d)
return ret
return wrapper
示例6: __init__
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import deque [as 別名]
def __init__(self, app):
"""
:type app: feeluown.app.App
"""
self._app = app
# store songs that are going to be added to playlist
self._queue = deque()
self._activated = False
self._is_fetching_songs = False
self._fetch_songs_task_name = 'fm-fetch-songs'
self._fetch_songs_func = None
self._minimum_per_fetch = 3
self._app.playlist.mode_changed.connect(self._on_playlist_mode_changed)
示例7: profile
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import deque [as 別名]
def profile(func):
name = func.__name__
samples = queue.deque(maxlen=5)
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if not self.is_debug_enabled:
return func(self, *args, **kwargs)
start = clock()
ret = func(self, *args, **kwargs)
n = tfloat(clock() - start)
if len(samples) < 2:
m = 0
d = 0
n.color = 36
else:
m = mean(samples)
if stdev:
d = tfloat(stdev(samples))
else:
d = 0
if n <= m + d:
n.color = 32
elif n > m + d * 2:
n.color = 31
else:
n.color = 33
samples.append(n)
self.info('\x1b[34m%s\x1b[m t = %s, \u00b5 = %s, \u03c3 = %s)',
name, n, m, d)
return ret
return wrapper
示例8: __init__
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import deque [as 別名]
def __init__(self):
self.values = {}
self.lock = threading.Lock()
self.queue = queue.deque()
self.index = 0
示例9: min_subnet_depth
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import deque [as 別名]
def min_subnet_depth(topology):
"""Find the minumum depth of each subnet in the network graph in terms of steps
from an exposed subnet to each subnet
Parameters
----------
topology : 2D matrix
An adjacency matrix representing the network, with first subnet
representing the internet (i.e. exposed)
Returns
-------
depths : list
depth of each subnet ordered by subnet index in topology
"""
num_subnets = len(topology)
assert len(topology[0]) == num_subnets
depths = []
Q = deque()
for subnet in range(num_subnets):
if topology[subnet][INTERNET] == 1:
depths.append(0)
Q.appendleft(subnet)
else:
depths.append(float('inf'))
while len(Q) > 0:
parent = Q.pop()
for child in range(num_subnets):
if topology[parent][child] == 1:
# child is connected to parent
if depths[child] > depths[parent] + 1:
depths[child] = depths[parent] + 1
Q.appendleft(child)
return depths
示例10: statistic
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import deque [as 別名]
def statistic(self):
now = time.time()
if now > self.last_statistic_time + 60:
rtt = 0
sent = 0
received = 0
for stat in self.second_stats:
rtt = max(rtt, stat["rtt"])
sent += stat["sent"]
received += stat["received"]
self.minute_stat = {
"rtt": rtt,
"sent": sent,
"received": received
}
self.second_stats = queue.deque()
self.last_statistic_time = now
if len(self.rtts):
rtt = max(self.rtts)
else:
rtt = 0
self.second_stat = {
"rtt": rtt,
"sent": self.total_sent - self.last_sent,
"received": self.total_received - self.last_received
}
self.rtts = []
self.last_sent = self.total_sent
self.last_received = self.total_received
self.second_stats.append(self.second_stat)
示例11: numpy_mode
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import deque [as 別名]
def numpy_mode():
with chainer.using_config('train', False):
with chainer.no_backprop_mode():
netA_path = input('enter netA path...')
netB_path = input('enter netB path...')
wave_path = input('enter wave path...')
ds = dataset.WaveDataset(wave_path, -1, True)
netA = Generator()
netB = Generator()
chainer.serializers.load_npz(netA_path, netA)
chainer.serializers.load_npz(netB_path, netB)
que_a = queue.deque()
que_ab = queue.deque()
que_aba = queue.deque()
gla = GLA()
print('converting...')
for i in tqdm.tqdm(range(ds.max//dataset.dif)):
x_a = ds.get_example(i)
x_a = chainer.dataset.convert.concat_examples([x_a], -1)
x_a = chainer.Variable(x_a)
x_ab = netA(x_a)
x_aba = netB(x_ab)
que_a .append(x_a .data[0])
que_ab .append(x_ab .data[0])
que_aba.append(x_aba.data[0])
print('done')
print('phase estimating...')
for i, que, name in zip(range(3), [que_a, que_ab, que_aba], ['a.wav', 'ab.wav', 'aba.wav']):
print()
print(i+1, '/ 3')
wave = np.concatenate([gla.inverse(c_f) for i_f in tqdm.tqdm(que) for c_f in dataset.reverse(i_f)])
print('done...')
dataset.save(wave_path + name, 16000, wave)
print('wave-file saved at', wave_path + name)
print('all done')
示例12: cupy_mode
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import deque [as 別名]
def cupy_mode(gpu):
from gla.gla_gpu import GLA_GPU
cp = chainer.cuda.cupy
with chainer.using_config('train', False):
with chainer.no_backprop_mode():
netA_path = input('enter netA path...')
netB_path = input('enter netB path...')
wave_path = input('enter wave path...')
batchsize = int(input('enter batch size...'))
chainer.cuda.get_device_from_id(gpu).use()
ds = dataset.WaveDataset(wave_path, -1, True)
netA = Generator()
netB = Generator()
chainer.serializers.load_npz(netA_path, netA)
chainer.serializers.load_npz(netB_path, netB)
netA.to_gpu()
netB.to_gpu()
que_a = queue.deque()
que_ab = queue.deque()
que_aba = queue.deque()
gla = GLA_GPU(batchsize*4)
print('converting...')
l = ds.max//dataset.dif
for i in tqdm.tqdm(range(0, l, batchsize)):
x_a = [ds.get_example(_i) for _i in range(i, min([i+batchsize, l]))]
x_a = chainer.dataset.convert.concat_examples(x_a, gpu)
x_a = chainer.Variable(x_a)
x_ab = netA(x_a)
x_aba = netB(x_ab)
que_a .extend([dataset.reverse(_x) for _x in cp.asnumpy(x_a .data)])
que_ab .extend([dataset.reverse(_x) for _x in cp.asnumpy(x_ab .data)])
que_aba.extend([dataset.reverse(_x) for _x in cp.asnumpy(x_aba.data)])
img_a = np.concatenate(que_a, axis=0)
img_ab = np.concatenate(que_ab, axis=0)
img_aba = np.concatenate(que_aba, axis=0)
print('done')
print('phase estimating...')
for i, img, name in zip(range(3), [img_a, img_ab, img_aba], ['a.wav', 'ab.wav', 'aba.wav']):
print()
print(i+1, '/ 3')
wave = gla.auto_inverse(img)
print('done...')
dataset.save(wave_path + name, 16000, wave)
print('wave-file saved at', wave_path + name)
print('all done')
示例13: _download_multithreaded
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import deque [as 別名]
def _download_multithreaded(self, input_items, num_threads, trace_custom_fields={}, traces_copy_out=None):
"""
Starts an appropriate number of threads to download items from the input list.
(This function is meant to be used as class internal only)
:param input_items: list containing the input items to download
:param num_threads: suggestion of how many threads should be started
:param trace_custom_fields: Custom key value pairs to send with the traces
:param traces_copy_out: reference to an external list, where the traces should be uploaded
:returns: list with output items as dictionaries
"""
logger = self.logger
num_files = len(input_items)
nlimit = 5
num_threads = max(1, num_threads)
num_threads = min(num_files, num_threads, nlimit)
input_queue = Queue()
output_queue = Queue()
input_queue.queue = deque(input_items)
if num_threads < 2:
logger.info('Using main thread to download %d file(s)' % num_files)
self._download_worker(input_queue, output_queue, trace_custom_fields, traces_copy_out, '')
return list(output_queue.queue)
logger.info('Using %d threads to download %d files' % (num_threads, num_files))
threads = []
for thread_num in range(0, num_threads):
log_prefix = 'Thread %s/%s: ' % (thread_num, num_threads)
kwargs = {'input_queue': input_queue,
'output_queue': output_queue,
'trace_custom_fields': trace_custom_fields,
'traces_copy_out': traces_copy_out,
'log_prefix': log_prefix}
try:
thread = Thread(target=self._download_worker, kwargs=kwargs)
thread.start()
threads.append(thread)
except Exception as error:
logger.warning('Failed to start thread %d' % thread_num)
logger.debug(error)
try:
logger.debug('Waiting for threads to finish')
for thread in threads:
thread.join()
except KeyboardInterrupt:
logger.warning('You pressed Ctrl+C! Exiting gracefully')
for thread in threads:
thread.kill_received = True
return list(output_queue.queue)
示例14: __init__
# 需要導入模塊: import queue [as 別名]
# 或者: from queue import deque [as 別名]
def __init__(self, logger, config, ip_manager, connection_manager,
http1worker=Http1Worker,
http2worker=Http2Worker):
self.logger = logger
self.config = config
self.ip_manager = ip_manager
self.connection_manager = connection_manager
self.connection_manager.set_ssl_created_cb(self.on_ssl_created_cb)
self.http1worker = http1worker
self.http2worker = http2worker
self.request_queue = queue.Queue()
self.workers = []
self.working_tasks = {}
self.h1_num = 0
self.h2_num = 0
self.last_request_time = time.time()
self.task_count_lock = threading.Lock()
self.task_count = 0
self.running = True
# for statistic
self.success_num = 0
self.fail_num = 0
self.continue_fail_num = 0
self.last_fail_time = 0
self.rtts = []
self.last_sent = self.total_sent = 0
self.last_received = self.total_received = 0
self.second_stats = queue.deque()
self.last_statistic_time = time.time()
self.second_stat = {
"rtt": 0,
"sent": 0,
"received": 0
}
self.minute_stat = {
"rtt": 0,
"sent": 0,
"received": 0
}
self.trigger_create_worker_cv = SimpleCondition()
self.wait_a_worker_cv = simple_queue.Queue()
threading.Thread(target=self.dispatcher).start()
threading.Thread(target=self.create_worker_thread).start()
threading.Thread(target=self.connection_checker).start()