本文整理汇总了Python中multiprocessing.queues.Queue.put方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.put方法的具体用法?Python Queue.put怎么用?Python Queue.put使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.queues.Queue
的用法示例。
在下文中一共展示了Queue.put方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: WorkerThreads
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import put [as 别名]
class WorkerThreads(object):
def __init__(self, threads=1):
"""
Initialize the thread pool and queues.
"""
self.pools = ThreadPool(processes=threads)
self.updater_queue = Queue()
def get_updater_queue(self):
return self.updater_queue
def updater(self, ident, state, meta):
"""
Updater function: This just post a message to a queue.
"""
self.updater_queue.put({'id': ident, 'state': state, 'meta': meta})
def pull(self, request, updater, testmode=0):
try:
pull(request, updater, testmode=testmode)
except Exception as err:
resp = {'error_type': str(type(err)),
'message': str(err)}
updater.update_status('FAILURE', 'FAILURE', response=resp)
def expire(self, request, updater):
try:
remove_image(request, updater)
except Exception as err:
resp = {'error_type': str(type(err)),
'message': str(err)}
updater.update_status('FAILURE', 'FAILURE', response=resp)
def wrkimport(self, request, updater, testmode=0):
try:
img_import(request, updater, testmode=testmode)
except Exception as err:
resp = {'error_type': str(type(err)),
'message': str(err)}
updater.update_status('FAILURE', 'FAILURE', response=resp)
def dopull(self, ident, request, testmode=0):
"""
Kick off a pull operation.
"""
updater = Updater(ident, self.updater)
self.pools.apply_async(self.pull, [request, updater],
{'testmode': testmode})
def doexpire(self, ident, request, testmode=0):
updater = Updater(ident, self.updater)
self.pools.apply_async(self.expire, [request, updater])
def dowrkimport(self, ident, request, testmode=0):
logging.debug("wrkimport starting")
updater = Updater(ident, self.updater)
self.pools.apply_async(self.wrkimport, [request, updater],
{'testmode': testmode})
示例2: put
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import put [as 别名]
def put(self, obj, block=True, timeout=None):
Queue.put(self, obj, block, timeout)
self._put_counter.value += 1
if self.qsize() != 0:
self.cond_notempty.acquire()
try:
self.cond_notempty.notify_all()
finally:
self.cond_notempty.release()
示例3: testDodgyActor
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import put [as 别名]
def testDodgyActor(self):
queue = Queue()
yield self.spawn(actor_class=DodgyActor, max_requests=1,
ioqueue=queue, on_event=on_event)
proxy = pulsar.get_actor().get_actor(self.a.aid)
self.assertEqual(proxy.name, 'dodgyactor')
queue.put(('request', 'Hello'))
c = 0
while c < 20:
if not proxy.is_alive():
break
else:
c += 1
yield pulsar.NOT_DONE
self.assertFalse(proxy.is_alive())
示例4: start_work
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import put [as 别名]
def start_work(self, worker, work, num_jobs, *args, **kwargs):
'''work should be and indexable sequence'''
wlen = len(work)
if not wlen: return
if self._counter is not None: self._counter.set_work(wlen)
#determine number of jobs to start
if not num_jobs: num_jobs = cpu_count
#prepare jobs
in_queue = Queue(wlen+num_jobs)
self._jobs = [None]*num_jobs
for j in xrange(num_jobs):
queue = Queue()
job = UProcess(target=worker, args=(queue, self._abort_event,
in_queue, work)+args, kwargs=kwargs)
job.daemon = self._daemonic
self._jobs[j] = Job(job,queue)
self.start_jobs()
for i in xrange(wlen): in_queue.put(i, False)
for j in xrange(num_jobs): in_queue.put(None, False)
示例5: put
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import put [as 别名]
def put(self,element):
'''
Put the element in the queue
Raises an exception if too many errors are
encountered
'''
dt = 1e-3
while dt < 1:
try:
Queue.put(self,element)
return
except IOError:
logger.warning('IOError encountered in SafeQueue put()')
try:
time.sleep(dt)
except:pass
dt *= 2
e = IOError('Unrecoverable error')
raise e
示例6: test_monitor_thread
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import put [as 别名]
def test_monitor_thread():
"""Test the monitor thread rerouting news to engine signal.
"""
from atom.api import Value
class E(BaseEngine):
test = Value()
def _observe_progress(self, val):
self.test = val
q = Queue()
e = E()
m = ThreadMeasureMonitor(e, q)
m.start()
q.put('test')
q.put(('', ''))
q.put((None, None))
m.join()
assert e.test == 'test'
示例7: ActionGeneratorDataFormer
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import put [as 别名]
#.........这里部分代码省略.........
class AuthorAdder(Process):
def __init__(self, queue, outer):
super(ActionGeneratorDataFormer.AuthorAdder, self).__init__(name="author_adder")
self.q = queue
self.outer = outer
def run(self):
log.info("author adder started")
while 1:
try:
author = self.q.get()
r_author = self.outer._get_author_object(author)
c_karma = r_author.__dict__.get("comment_karma", 0)
l_karma = r_author.__dict__.get("link_karma", 0)
if c_karma > AE_MIN_COMMENT_KARMA and l_karma > AE_MIN_LINK_KARMA:
log.info("will add [%s] for action engine" % (author))
self.outer._add_author_data(r_author)
except Exception as e:
log.exception(e)
def __init__(self):
self._storage = AuthorsStorage("author_generator_data_former")
self._r = praw.Reddit(user_agent=choice(USER_AGENTS))
self._queue = Queue()
adder = ActionGeneratorDataFormer.AuthorAdder(self._queue, self)
adder.start()
def is_author_added(self, author):
found = self._storage.steps.find_one({"author": author})
return found is not None
def save_action(self, author, action_type, time, end_time=None):
q = {"author": author, "action_type": action_type}
if isinstance(time, datetime):
q["time"] = time_hash(time)
elif isinstance(time, int):
q["time"] = time
if end_time:
q["end_time"] = end_time
found = self._storage.steps.find_one(q)
if found:
self._storage.steps.update_one(q, {"$inc": {"count": 1}})
else:
q["count"] = 1
self._storage.steps.insert_one(q)
def revert_sleep_actions(self, group_id=None):
q = {'end_time': {'$exists': True}}
if group_id:
q["used"] = group_id
self._storage.steps.delete_many(q)
def fill_consume_and_sleep(self, authors_min_actions_count=AE_AUTHOR_MIN_ACTIONS, min_sleep=AE_MIN_SLEEP_TIME,
max_sleep=AE_MAX_SLEEP_TIME):
for author in self._storage.get_interested_authors(authors_min_actions_count):
start_time, end_time = 0, 0
actions = self._storage.steps.find({"author": author}).sort("time", 1)
for i, action in enumerate(actions):
if i == 0:
start_time = action.get("time")
continue
end_time = action.get("time")
delta = (end_time - start_time)
if delta > min_sleep and delta < max_sleep:
self.save_action(author, A_SLEEP, start_time, end_time)
start_time = end_time
log.info("Was update consume and sleep steps for %s" % author)
def _get_author_object(self, author_name):
r_author = self._r.get_redditor(author_name, fetch=True)
return r_author
def _get_data_of(self, r_author):
try:
cb = list(r_author.gets(sort="new", limit=1000))
sb = list(r_author.get_submitted(sort="new", limit=1000))
return cb, sb
except Exception as e:
log.exception(e)
return [], []
def _add_author_data(self, r_author):
log.info("will retrieve comments of %s" % r_author.name)
_comments, _posts = self._get_data_of(r_author)
for comment in _comments:
self.save_action(r_author.name, A_COMMENT, datetime.fromtimestamp(comment.created_utc))
for submission in _posts:
self.save_action(r_author.name, A_POST, datetime.fromtimestamp(submission.created_utc))
log.info("fill %s comments and %s posts" % (len(_comments), len(_posts)))
def add_author_data(self, author):
if not self.is_author_added(author):
self._queue.put(author)
示例8: put
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import put [as 别名]
def put(self, obj, block=True, timeout=None):
Queue.put(self, (self._id, obj), block=block, timeout=timeout)
示例9: put_back
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import put [as 别名]
def put_back(self, obj, block=True, timeout=None):
"""Used when a task is put back to be processed more"""
Queue.put(self, obj, block, timeout)
示例10: put
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import put [as 别名]
def put(self, obj, block=True, timeout=None):
"""Used when a task is put first time"""
Queue.put(self, obj, block, timeout)
self._tasks_lock.acquire()
self._tasks.value += 1
self._tasks_lock.release()
示例11: put
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import put [as 别名]
def put(self, *args, **kwargs):
# If the put fails, the exception will prevent us from incrementing the counter
Queue.put(self, *args, **kwargs)
with self._lock:
self._counter.value += 1
示例12: ProcessEngine
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import put [as 别名]
class ProcessEngine(BaseEngine):
""" An engine executing the measurement it is sent in a different process.
"""
# --- Public API ----------------------------------------------------------
#: Reference to the workbench got at __init__
workbench = Typed(Workbench)
def prepare_to_run(self, name, root, monitored_entries, build_deps):
runtime_deps = root.run_time
# Get ConfigObj describing measure.
root.update_preferences_from_members()
config = root.task_preferences
# Make infos tuple to send to the subprocess.
self._temp = (name, config, build_deps, runtime_deps,
monitored_entries)
# Clear all the flags.
self._meas_pause.clear()
self._meas_paused.clear()
self._meas_stop.clear()
self._stop.clear()
self._force_stop.clear()
self._stop_requested = False
# If the process does not exist or is dead create a new one.
if not self._process or not self._process.is_alive():
self._pipe, process_pipe = Pipe()
self._process = TaskProcess(process_pipe,
self._log_queue,
self._monitor_queue,
self._meas_pause,
self._meas_paused,
self._meas_stop,
self._stop)
self._process.daemon = True
self._log_thread = QueueLoggerThread(self._log_queue)
self._log_thread.daemon = True
self._monitor_thread = ThreadMeasureMonitor(self,
self._monitor_queue)
self._monitor_thread.daemon = True
self._pause_thread = None
self.measure_status = ('PREPARED', 'Engine ready to process.')
def run(self):
if not self._process.is_alive():
# Starting monitoring threads.
self._log_thread.start()
self._monitor_thread.start()
# Start process.
self._process.start()
self.active = True
# Start main communication thread.
self._com_thread = Thread(group=None,
target=self._process_listener)
self._com_thread.start()
self._starting_allowed.set()
self.measure_status = ('RUNNING', 'Measure running.')
def pause(self):
self.measure_status = ('PAUSING', 'Waiting for measure to pause.')
self._meas_pause.set()
self._pause_thread = Thread(target=self._wait_for_pause)
self._pause_thread.start()
def resume(self):
self._meas_pause.clear()
self.measure_status = ('RUNNING', 'Measure have been resumed.')
def stop(self):
self._stop_requested = True
self._meas_stop.set()
def exit(self):
self._stop_requested = True
self._meas_stop.set()
self._stop.set()
# Everything else handled by the _com_thread and the process.
def force_stop(self):
self._stop_requested = True
# Just in case the user calls this directly. Will signal all threads to
# stop (save _com_thread).
self._stop.set()
self._log_queue.put(None)
self._monitor_queue.put((None, None))
#.........这里部分代码省略.........
示例13: __init__
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import put [as 别名]
class YubiGuard:
def __init__(self, scrlck_mode=False):
self.scrlck_mode = scrlck_mode
self.id_q = Queue()
self.on_q = Queue()
self.pi_q = Queue()
# init processes
gi_proc = Process(target=self.get_ids)
gi_proc.daemon = True
cs_proc = Process(target=self.change_state)
# no daemon, or main program will terminate before Keys can be unlocked
cs_proc.daemon = False
zmq_lis = ZmqListener(
self.on_q) # somehow works ony with threads not processes
zmq_lis_thr = Thread(target=zmq_lis.start_listener)
zmq_lis_thr.setDaemon(True)
pi = PanelIndicator(self.pi_q, self.on_q)
# starting processes and catching exceptions:
try:
gi_proc.start()
cs_proc.start()
zmq_lis_thr.start()
pi.run_pi() # main loop of root process
except (KeyboardInterrupt, SystemExit):
print('Caught exit event.')
finally:
# send exit signal, will reactivate YubiKey slots
print('Sending EXIT_SIGNAL')
self.on_q.put(EXIT_SIGNAL)
def get_ids(self):
old_id_l = []
no_key = True
pat = re.compile(r"(?:Yubikey.*?id=)(\d+)", re.IGNORECASE)
while True:
new_id_l = []
# get list of xinput device ids and extract those of YubiKeys:
xinput = shell_this('xinput list')
matches = re.findall(pat, xinput)
new_id_l.extend(matches)
new_id_l.sort()
if not new_id_l and not no_key:
self.pi_q.put(NOKEY_SIGNAL)
print('No YubiKey(s) detected.')
no_key = True
elif new_id_l and no_key:
self.pi_q.put(OFF_SIGNAL)
print('YubiKey(s) detected.')
no_key = False
# notify:
msg_cmd = """notify-send --expire-time=2000 \
'YubiKey(s) detected.'"""
shell_this(msg_cmd)
if new_id_l != old_id_l:
print('Change in YubiKey ids detected. From {} to {}.'.format(
old_id_l, new_id_l))
self.id_q.put(new_id_l)
# lock screen if screenlock and YubiKey is removed:
if self.scrlck_mode and len(new_id_l) < len(old_id_l):
print('Locking screen.')
shell_this(get_scrlck_cmd()) # execute screen lock command
old_id_l = new_id_l
time.sleep(.1)
def turn_keys(self, id_l, lock=True
): # problem of value loss of cs_id_l found in this function
tk_id_l = id_l
if lock:
print('Locking YubiKey(s).')
state_flag = '0'
self.pi_q.put(OFF_SIGNAL)
else:
print('Unlocking YubiKey(s).')
state_flag = '1'
self.pi_q.put(ON_SIGNAL)
shell_this('; '.join(["xinput set-int-prop {} \"Device Enabled\" 8 {}".
format(tk_id, state_flag) for tk_id in tk_id_l]))
def check_state(self, check_id_l):
# check if all states have indeed changed:
pat = re.compile(r"(?:Device Enabled.+?:).?([01])", re.IGNORECASE)
# check if state has indeed changed:
for tk_id in check_id_l:
#.........这里部分代码省略.........
示例14: ApiBase
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import put [as 别名]
class ApiBase(iApi, iIpcTransportDataReceiveListener):
r"""
@summary: The base-class to the top-level api object ONLY, not sub-apis.
"""
DEFAULT_MAX_ASYNC_HANDLERS = 1
def __init__(self, name, ns="", solicited=True, ignoreUnhandled=False, maxAsync=None):
super(ApiBase, self).__init__(ns=ns, solicited=solicited, name=name)
self._setup(ns=self._getNamespace(), solicited=self.solicited, ipc=self.ipc)
self._dataRxCount = itertools.count(0)
self._ignoreUnhandled = ignoreUnhandled
if (maxAsync == None) or (maxAsync < 1):
maxAsync = ApiBase.DEFAULT_MAX_ASYNC_HANDLERS
self._maxAsync = maxAsync
self._q = Queue()
self._workers = []
self._createAsyncWorkers()
self.isAlive = True
def _createAsyncWorkers(self, start=True):
# Create the thread pool to handle the api calls.
for _ in range(0, self._maxAsync):
thread = ApiAsyncWorker.create(self._q, self, start=start)
self._workers.append(thread)
self._logger.debug("Created workers.")
def __del__(self):
self.teardown()
def teardown(self):
if self is threading.current_thread(): return
if not self.isAlive: return
self.isAlive = False
# Unfortunately we require time to stop the workers.
self._logger.debug("Stopping async workers...")
for _ in range(0, self._maxAsync):
self._q.put(STOP())
time.sleep(1)
for worker in self._workers:
worker.stop()
for worker in self._workers:
if worker.isAlive(): worker.join()
self._workers = []
self._q.close()
time.sleep(1)
del self._q
self._q = None
self._logger.debug("Stopped async workers (all daemon anyway).")
# Now un-bind our data-receive listener from the IPC:
if self._ipc != None:
self._ipc.setTransportDataReceiveListener(self)
self._ipc = None
def _newIpc(self):
super(ApiBase, self)._newIpc()
# Now bind our data-receive listener to the IPC:
self._ipc.setTransportDataReceiveListener(self)
def transportDataReceive(self, tId, data):
r"""
@summary: Data is received that is NOT part of an existing transaction.
We need to decide what to do with it...
Recursively ask each of our sub-api's to decode the data and handle it.
If no one can, then return UnsupportedApiError() (unless we consume it with:
self._ignoreUnhandled==True).
The handlers will have previously been set by the controlling entity, ie:
ExecutionOrganiser, Head.
This method always returns NoResponseRequired, making the call asynchronous.
"""
myNsPrefix = self._getNamespacePrefix()
try:
count = self._dataRxCount.next()
if isinstance(data, iApiTransportItem):
ns = data.ns()
if self._isInMyNamespace(ns):
self._findHandler(ns)
args = data.args()
kwargs = data.kwargs()
synchronous = True
self._q.put(KNOWN(ns, tId, synchronous, count, args, kwargs))
raise NoResponseRequired(ns)
else:
# Inform our listener about the data that we can't handle:
handler = self.transportDataReceiveListener
if handler != None:
self._q.put(UNKNOWN(tId, data))
raise NoResponseRequired(myNsPrefix)
except UnsupportedApiError, e:
if self._ignoreUnhandled == False:
# Propagate exception directly as before.
raise
# Consume silently:
self._logger.debug("UnsupportedApiError: %(NS)s" % {"NS":e.ns()})
raise NoResponseRequired(myNsPrefix, e)
示例15: ComponentTestCase
# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import put [as 别名]
class ComponentTestCase(TestCase):
def setUp(self):
self.notif_queue = Queue(1)
self.error_queue = Queue()
self.component = Component(self.notif_queue._reader,
CommonErrorStrategy(),
self.error_queue,
PostgresConnector(_POSTGRES_DSN))
self.component.log = MagicMock()
def tearDown(self):
# Component can have an attribute error on _parent_pid due to the fact
# that we defer super.__init__() until start()
if self.component.is_alive():
self.component.terminate()
self.component.join()
@patch('hermes.components.select.select', side_effect=InterfaceError)
def test_component_interface_error(self, select_module):
# Start the component and let it settle
self.component.start()
sleep(1)
self.assertFalse(self.component.is_alive())
def test_component_select_error(self):
"""
Due to process memory isolation we must mock the cleaup to put a
pre-defined string into the error queue.
"""
with patch('hermes.components.select.select',
side_effect=select.error):
self.component.start()
sleep(1)
# Ensure the string PUT by the process is the same as what
# was returned by queue.
self.assertFalse(self.component.is_alive())
def test_not_implemented_exception_on_execute(self):
exception_occurred = False
try:
self.component.execute(None)
except NotImplementedError:
exception_occurred = True
self.assertTrue(exception_occurred)
def test_exits_on_terminate(self):
self.component.start()
sleep(1)
self.component.terminate()
sleep(1)
self.assertFalse(self.component.is_alive())
def test_execute_called_on_notification(self):
error_string = util.rand_string(10)
def mock_func(*args, **kwargs):
"""
The process will have isolated this function, as well as the
error queue.
"""
self.component.error_queue.put(error_string)
with patch('hermes.components.Component.execute') as mock_execute:
mock_execute.side_effect = mock_func
self.component.start()
sleep(2)
self.assertTrue(self.component.is_alive())
self.notif_queue.put(1)
return_string = self.error_queue.get(timeout=2)
self.assertEqual(error_string, return_string)
def test_execute_done_called_on_notification(self):
error_string = util.rand_string(10)
def mock_func(*args, **kwargs):
"""
The process will have isolated this function, as well as the
error queue.
"""
self.component.error_queue.put(error_string)
self.component.post_execute = MagicMock()
self.component.post_execute.side_effect = mock_func
self.component.execute = MagicMock()
self.component.start()
self.notif_queue.put(1)
return_string = self.error_queue.get()
self.assertEqual(error_string, return_string)
def test_error_received_on_exception_in_execute(self):
mock_execption_return = (False, util.rand_string(10))
error_strat = AbstractErrorStrategy()
#.........这里部分代码省略.........