当前位置: 首页>>代码示例>>Python>>正文


Python Event.clear方法代码示例

本文整理汇总了Python中tornado.locks.Event.clear方法的典型用法代码示例。如果您正苦于以下问题:Python Event.clear方法的具体用法?Python Event.clear怎么用?Python Event.clear使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tornado.locks.Event的用法示例。


在下文中一共展示了Event.clear方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_exit_callback

# 需要导入模块: from tornado.locks import Event [as 别名]
# 或者: from tornado.locks.Event import clear [as 别名]
def test_exit_callback():
    to_child = mp_context.Queue()
    from_child = mp_context.Queue()
    evt = Event()

    @gen.coroutine
    def on_stop(_proc):
        assert _proc is proc
        yield gen.moment
        evt.set()

    # Normal process exit
    proc = AsyncProcess(target=feed, args=(to_child, from_child))
    evt.clear()
    proc.set_exit_callback(on_stop)
    proc.daemon = True

    yield proc.start()
    yield gen.sleep(0.05)
    assert proc.is_alive()
    assert not evt.is_set()

    to_child.put(None)
    yield evt.wait(timedelta(seconds=3))
    assert evt.is_set()
    assert not proc.is_alive()

    # Process terminated
    proc = AsyncProcess(target=wait)
    evt.clear()
    proc.set_exit_callback(on_stop)
    proc.daemon = True

    yield proc.start()
    yield gen.sleep(0.05)
    assert proc.is_alive()
    assert not evt.is_set()

    yield proc.terminate()
    yield evt.wait(timedelta(seconds=3))
    assert evt.is_set()
开发者ID:tomMoral,项目名称:distributed,代码行数:43,代码来源:test_asyncprocess.py

示例2: IndexManager

# 需要导入模块: from tornado.locks import Event [as 别名]
# 或者: from tornado.locks.Event import clear [as 别名]
class IndexManager(object):
  """ Keeps track of configured datastore indexes. """
  # The node which keeps track of admin lock contenders.
  ADMIN_LOCK_NODE = '/appscale/datastore/index_manager_lock'

  def __init__(self, zk_client, datastore_access, perform_admin=False):
    """ Creates a new IndexManager.

    Args:
      zk_client: A kazoo.client.KazooClient object.
      datastore_access: A DatastoreDistributed object.
      perform_admin: A boolean specifying whether or not to perform admin
        operations.
    """
    self.projects = {}
    self._wake_event = AsyncEvent()
    self._zk_client = zk_client
    self.admin_lock = AsyncKazooLock(self._zk_client, self.ADMIN_LOCK_NODE)

    # TODO: Refactor so that this dependency is not needed.
    self._ds_access = datastore_access

    self._zk_client.ensure_path('/appscale/projects')
    self._zk_client.ChildrenWatch('/appscale/projects', self._update_projects)

    # Since this manager can be used synchronously, ensure that the projects
    # are populated for this IOLoop iteration.
    project_ids = self._zk_client.get_children('/appscale/projects')
    self._update_projects_sync(project_ids)

    if perform_admin:
      IOLoop.current().spawn_callback(self._contend_for_admin_lock)

  def _update_projects_sync(self, new_project_ids):
    """ Updates the list of the deployment's projects.

    Args:
      new_project_ids: A list of strings specifying current project IDs.
    """
    for project_id in new_project_ids:
      if project_id not in self.projects:
        self.projects[project_id] = ProjectIndexManager(
          project_id, self._zk_client, self, self._ds_access)
        if self.admin_lock.is_acquired:
          IOLoop.current().spawn_callback(
            self.projects[project_id].apply_definitions)

    for project_id in self.projects.keys():
      if project_id not in new_project_ids:
        self.projects[project_id].active = False
        del self.projects[project_id]

  def _update_projects(self, project_ids):
    """ Watches for changes to list of existing projects.

    Args:
      project_ids: A list of strings specifying current project IDs.
    """
    persistent_update_projects = retry_children_watch_coroutine(
      '/appscale/projects', self._update_projects_sync)
    IOLoop.instance().add_callback(persistent_update_projects, project_ids)

  def _handle_connection_change(self, state):
    """ Notifies the admin lock holder when the connection changes.

    Args:
      state: The new connection state.
    """
    IOLoop.current().add_callback(self._wake_event.set)

  @gen.coroutine
  def _contend_for_admin_lock(self):
    """
    Waits to acquire an admin lock that gives permission to apply index
    definitions. The lock is useful for preventing many servers from writing
    the same index entries at the same time. After acquiring the lock, the
    individual ProjectIndexManagers are responsible for mutating state whenever
    a project's index definitions change.
    """
    while True:
      # Set up a callback to get notified if the ZK connection changes.
      self._wake_event.clear()
      self._zk_client.add_listener(self._handle_connection_change)

      yield self.admin_lock.acquire()
      try:
        for project_index_manager in self.projects.values():
          IOLoop.current().spawn_callback(
            project_index_manager.apply_definitions)

        # Release the lock if the kazoo client gets disconnected.
        yield self._wake_event.wait()
      finally:
        self.admin_lock.release()
开发者ID:AppScale,项目名称:appscale,代码行数:96,代码来源:index_manager.py

示例3: Queue

# 需要导入模块: from tornado.locks import Event [as 别名]
# 或者: from tornado.locks.Event import clear [as 别名]

#.........这里部分代码省略.........
        current time.
        """
        future = Future()
        try:
            future.set_result(self.get_nowait())
        except QueueEmpty:
            self._getters.append(future)
            _set_timeout(future, timeout)
        return future

    def get_nowait(self):
        """Remove and return an item from the queue without blocking.

        Return an item if one is immediately available, else raise
        `QueueEmpty`.
        """
        self._consume_expired()
        if self._putters:
            assert self.full(), "queue not full, why are putters waiting?"
            item, putter = self._putters.popleft()
            self.__put_internal(item)
            putter.set_result(None)
            return self._get()
        elif self.qsize():
            return self._get()
        else:
            raise QueueEmpty

    def task_done(self):
        """Indicate that a formerly enqueued task is complete.

        Used by queue consumers. For each `.get` used to fetch a task, a
        subsequent call to `.task_done` tells the queue that the processing
        on the task is complete.

        If a `.join` is blocking, it resumes when all items have been
        processed; that is, when every `.put` is matched by a `.task_done`.

        Raises `ValueError` if called more times than `.put`.
        """
        if self._unfinished_tasks <= 0:
            raise ValueError('task_done() called too many times')
        self._unfinished_tasks -= 1
        if self._unfinished_tasks == 0:
            self._finished.set()

    def join(self, timeout=None):
        """Block until all items in the queue are processed.

        Returns a Future, which raises `tornado.util.TimeoutError` after a
        timeout.
        """
        return self._finished.wait(timeout)

    @gen.coroutine
    def __aiter__(self):
        return _QueueIterator(self)

    # These three are overridable in subclasses.
    def _init(self):
        self._queue = collections.deque()

    def _get(self):
        return self._queue.popleft()

    def _put(self, item):
        self._queue.append(item)
    # End of the overridable methods.

    def __put_internal(self, item):
        self._unfinished_tasks += 1
        self._finished.clear()
        self._put(item)

    def _consume_expired(self):
        # Remove timed-out waiters.
        while self._putters and self._putters[0][1].done():
            self._putters.popleft()

        while self._getters and self._getters[0].done():
            self._getters.popleft()

    def __repr__(self):
        return '<%s at %s %s>' % (
            type(self).__name__, hex(id(self)), self._format())

    def __str__(self):
        return '<%s %s>' % (type(self).__name__, self._format())

    def _format(self):
        result = 'maxsize=%r' % (self.maxsize, )
        if getattr(self, '_queue', None):
            result += ' queue=%r' % self._queue
        if self._getters:
            result += ' getters[%s]' % len(self._getters)
        if self._putters:
            result += ' putters[%s]' % len(self._putters)
        if self._unfinished_tasks:
            result += ' tasks=%s' % self._unfinished_tasks
        return result
开发者ID:FlorianLudwig,项目名称:tornado,代码行数:104,代码来源:queues.py

示例4: __init__

# 需要导入模块: from tornado.locks import Event [as 别名]
# 或者: from tornado.locks.Event import clear [as 别名]
class SlaveHolder:
    def __init__(self, db, queue):
        self.db = db
        self.slaves = {}
        self._finished = Event()
        self._finished.set()
        self.queue = queue

    @coroutine
    def start(self):
        self._finished.clear()
        logging.debug('Starting slave-holder')

        cur = yield self.db.execute('SELECT * FROM registered_bots WHERE active = TRUE')
        columns = [i[0] for i in cur.description]

        while True:
            row = cur.fetchone()
            if not row:
                break

            row = dict(zip(columns, row))
            self._start_bot(**row)

        listen_future = self.queue.listen(slaveholder_queues(), self.queue_handler)

        try:
            yield self._finished.wait()
        finally:
            self.queue.stop(slaveholder_queues())
            yield listen_future

    def _start_bot(self, **kwargs):
        @coroutine
        def listen_done(f: Future):
            logging.debug('[bot#%s] Terminated', kwargs['id'])
            e = f.exception()
            if e:
                logging.debug('[bot#%s] Got exception: %s %s', kwargs['id'], format_exception(*f.exc_info()))
                if isinstance(e, ApiError) and e.code == 401:
                    logging.warning('[bot#%d] Disabling due to connection error', kwargs['id'])
                    yield self.queue.send(QUEUE_BOTERATOR_BOT_REVOKE, dumps(dict(error=str(e), **kwargs)))
                elif isinstance(e, ApiError) and e.code == 400 and 'chat not found' in e.description and \
                    str(kwargs['moderator_chat_id']) in e.request_body:
                    logging.warning('[bot#%d] Disabling due to unavailable moderator chat', kwargs['id'])
                    yield self.queue.send(QUEUE_BOTERATOR_BOT_REVOKE, dumps(dict(error=str(e), **kwargs)))
                elif isinstance(e, ApiError) and e.code == 409 and 'webhook is active' in e.description:
                    logging.warning('[bot#%d] Disabling due to misconfigured webhook', kwargs['id'])
                    yield self.queue.send(QUEUE_BOTERATOR_BOT_REVOKE, dumps(dict(error=str(e), **kwargs)))
                else:
                    IOLoop.current().add_timeout(timedelta(seconds=5), self._start_bot, **kwargs)

            del self.slaves[kwargs['id']]

        slave = Slave(db=self.db, **kwargs)
        slave_listen_f = slave.start()
        self.slaves[kwargs['id']] = {
            'future': slave_listen_f,
            'instance': slave,
        }
        IOLoop.current().add_future(slave_listen_f, listen_done)

    def stop(self):
        logging.info('Stopping slave-holder')
        for slave in self.slaves.values():
            slave['instance'].stop()

        self._finished.set()

    @coroutine
    def queue_handler(self, queue_name, body):
        body = loads(body.decode('utf-8'))

        if queue_name == QUEUE_SLAVEHOLDER_NEW_BOT:
            self._start_bot(**body)
        elif queue_name == QUEUE_SLAVEHOLDER_GET_BOT_INFO:
            bot = Api(body['token'], lambda x: None)

            if bot.bot_id in self.slaves:
                logging.debug('[bot#%s] Already registered', bot.bot_id)
                yield self.queue.send(body['reply_to'], dumps(dict(error='duplicate')))

            try:
                ret = yield bot.get_me()
                logging.debug('[bot#%s] Ok', bot.bot_id)
            except Exception as e:
                logging.debug('[bot#%s] Failed', bot.bot_id)
                yield self.queue.send(body['reply_to'], dumps(dict(error=str(e))))
                return

            yield self.queue.send(body['reply_to'], dumps(ret))
        elif queue_name == QUEUE_SLAVEHOLDER_GET_MODERATION_GROUP:
            update_with_command_f = Future()
            timeout_f = with_timeout(timedelta(seconds=body['timeout']), update_with_command_f)

            @coroutine
            def slave_update_handler(update):
                logging.debug('[bot#%s] Received update', bot.bot_id)
                if attach_cmd_filter.test(**update):
                    logging.debug('[bot#%s] /attach', bot.bot_id)
#.........这里部分代码省略.........
开发者ID:andrey-yantsen,项目名称:boterator,代码行数:103,代码来源:slave_holder.py

示例5: ConnectionPool

# 需要导入模块: from tornado.locks import Event [as 别名]
# 或者: from tornado.locks.Event import clear [as 别名]
class ConnectionPool(object):
    """ A maximum sized pool of Tornado IOStreams

    This provides a connect method that mirrors the normal distributed.connect
    method, but provides connection sharing and tracks connection limits.

    This object provides an ``rpc`` like interface::

        >>> rpc = ConnectionPool(limit=512)
        >>> scheduler = rpc('127.0.0.1:8786')
        >>> workers = [rpc(ip=ip, port=port) for ip, port in ...]

        >>> info = yield scheduler.identity()

    It creates enough streams to satisfy concurrent connections to any
    particular address::

        >>> a, b = yield [scheduler.who_has(), scheduler.has_what()]

    It reuses existing streams so that we don't have to continuously reconnect.

    It also maintains a stream limit to avoid "too many open file handle"
    issues.  Whenever this maximum is reached we clear out all idling streams.
    If that doesn't do the trick then we wait until one of the occupied streams
    closes.
    """
    def __init__(self, limit=512):
        self.open = 0
        self.active = 0
        self.limit = limit
        self.available = defaultdict(set)
        self.occupied = defaultdict(set)
        self.event = Event()

    def __str__(self):
        return "<ConnectionPool: open=%d, active=%d>" % (self.open,
                self.active)

    __repr__ = __str__

    def __call__(self, arg=None, ip=None, port=None, addr=None):
        """ Cached rpc objects """
        ip, port = ip_port_from_args(arg=arg, addr=addr, ip=ip, port=port)
        return RPCCall(ip, port, self)

    @gen.coroutine
    def connect(self, ip, port, timeout=3):
        if self.available.get((ip, port)):
            stream = self.available[ip, port].pop()
            self.active += 1
            self.occupied[ip, port].add(stream)
            raise gen.Return(stream)

        while self.open >= self.limit:
            self.event.clear()
            self.collect()
            yield self.event.wait()

        self.open += 1
        stream = yield connect(ip=ip, port=port, timeout=timeout)
        stream.set_close_callback(lambda: self.on_close(ip, port, stream))
        self.active += 1
        self.occupied[ip, port].add(stream)

        if self.open >= self.limit:
            self.event.clear()

        raise gen.Return(stream)

    def on_close(self, ip, port, stream):
        self.open -= 1

        if stream in self.available[ip, port]:
            self.available[ip, port].remove(stream)
        if stream in self.occupied[ip, port]:
            self.occupied[ip, port].remove(stream)
            self.active -= 1

        if self.open <= self.limit:
            self.event.set()

    def collect(self):
        logger.info("Collecting unused streams.  open: %d, active: %d",
                    self.open, self.active)
        for k, streams in list(self.available.items()):
            for stream in streams:
                stream.close()
开发者ID:amosonn,项目名称:distributed,代码行数:89,代码来源:core.py

示例6: MonitoringLoop

# 需要导入模块: from tornado.locks import Event [as 别名]
# 或者: from tornado.locks.Event import clear [as 别名]
class MonitoringLoop(object):
    def __init__(self, check_time, fx_correlator_object):

        self.instrument = fx_correlator_object
        self.hosts = self.instrument.fhosts + self.instrument.xhosts
        self.selected_host = None
        self.host_index = 0
        self.num_hosts = len(self.hosts)
        self.num_fhosts = len(self.instrument.fhosts)
        self.num_xhosts = len(self.instrument.xhosts)
        # self.num_bhosts = len(self.instrument.bhosts)

        # check config file if bhosts or xhosts

        if check_time == -1:
            self.check_time = float(self.instrument.configd['FxCorrelator']['monitor_loop_time'])
        else:
            self.check_time = check_time

        # set up periodic engine monitoring
        self.instrument_monitoring_loop_enabled = IOLoopEvent()
        self.instrument_monitoring_loop_enabled.clear()
        self.instrument_monitoring_loop_cb = None

        self.f_eng_board_monitoring_dict_prev = {}
        self.x_eng_board_monitoring_dict_prev = {}
        self.b_eng_board_monitoring_dict_prev = {}

        self.disabled_fhosts = []
        self.disabled_xhosts = []
        self.disabled_bhosts = []

        # some other useful bits of info
        self.n_chans = self.instrument.n_chans
        self.chans_per_xhost = self.n_chans / self.num_xhosts

    def start(self):
        """
        Start the monitoring loop
        :return: none
        """
        self._instrument_monitoring_loop_timer_start(check_time=self.check_time)

    def stop(self):
        """
        Stop the monitoring loop
        :return: none
        """
        self._instrument_monitoring_loop_timer_stop()

    def _instrument_monitoring_loop_timer_start(self, check_time=None):
        """
        Set up periodic check of various instrument elements
        :param check_time: the interval, in seconds, at which to check
        :return:
        """

        if not IOLoop.current()._running:
            raise RuntimeError('IOLoop not running, this will not work')

        self.instrument.logger.info('instrument_monitoring_loop for instrument %s '
                                    'set up with a period '
                                    'of %.2f seconds' % (self.instrument.descriptor, self.check_time))

        if self.instrument_monitoring_loop_cb is not None:
            self.instrument_monitoring_loop_cb.stop()
        self.instrument_monitoring_loop_cb = PeriodicCallback(
            self._instrument_monitoring_loop, check_time * 1000)

        self.instrument_monitoring_loop_enabled.set()
        self.instrument_monitoring_loop_cb.start()
        self.instrument.logger.info('Instrument Monitoring Loop Timer '
                                    'Started @ '
                         '%s' % time.ctime())

    def _instrument_monitoring_loop_timer_stop(self):
        """
        Disable the periodic instrument monitoring loop
        :return:
        """

        if self.instrument_monitoring_loop_cb is not None:
            self.instrument_monitoring_loop_cb.stop()
        self.instrument_monitoring_loop_cb = None
        self.instrument_monitoring_loop_enabled.clear()
        self.instrument.logger.info('Instrument Monitoring Loop Timer Halted @ '
                         '%s' % time.ctime())

    # TODO: use functools to pass this callback function with parameters
    def _instrument_monitoring_loop(self, check_fhosts=True, check_xhosts=True, check_bhosts=False):
        """
        Perform various checks periodically.
        :param corner_turner_check: enable periodic checking of the corner-
        turner; will disable F-engine output on overflow
        :param coarse_delay_check: enable periodic checking of the coarse
        delay
        :param vacc_check: enable periodic checking of the vacc
        turner
        :return:
        """
#.........这里部分代码省略.........
开发者ID:ska-sa,项目名称:corr2,代码行数:103,代码来源:corr_monitoring_loop.py

示例7: ZMQDrain

# 需要导入模块: from tornado.locks import Event [as 别名]
# 或者: from tornado.locks.Event import clear [as 别名]
class ZMQDrain(object):
    """Implementation of IDrain that pushes to a zmq.Socket asynchronously.
    This implementation overrides the high-water mark behavior from
    cs.eyrie.vassal.Vassal to instead use a zmq.Poller.
    """

    def __init__(self, logger, loop, zmq_socket,
                 metric_prefix='emitter'):
        self.emitter = zmq_socket
        self.logger = logger
        self.loop = loop
        self.metric_prefix = metric_prefix
        self.output_error = Event()
        self.state = RUNNING
        self._writable = Event()
        self.sender_tag = 'sender:%s.%s' % (self.__class__.__module__,
                                            self.__class__.__name__)

    def _handle_events(self, fd, events):
        if events & self.loop.ERROR:
            self.logger.error('Error polling socket for writability')
        elif events & self.loop.WRITE:
            self.loop.remove_handler(self.emitter)
            self._writable.set()

    @gen.coroutine
    def _poll(self):
        self.loop.add_handler(self.emitter,
                              self._handle_events,
                              self.loop.WRITE)
        yield self._writable.wait()
        self._writable.clear()

    @gen.coroutine
    def close(self, timeout=None):
        self.state = CLOSING
        self.logger.debug("Flushing send queue")
        self.emitter.close()

    def emit_nowait(self, msg):
        self.logger.debug("Drain emitting")
        if isinstance(msg, basestring):
            msg = [msg]
        try:
            self.emitter.send_multipart(msg, zmq.NOBLOCK)
        except zmq.Again:
            raise QueueFull()

    @gen.coroutine
    def emit(self, msg, retry_timeout=INITIAL_TIMEOUT):
        if isinstance(msg, basestring):
            msg = [msg]
        while True:
            # This should ensure the ZMQ socket can accept more data
            yield self._poll()
            try:
                self.emitter.send_multipart(msg, zmq.NOBLOCK)
            except zmq.Again:
                # But sometimes it's not enough
                self.logger.debug('Error polling for socket writability')
                retry_timeout = min(retry_timeout*2, MAX_TIMEOUT)
                yield gen.sleep(retry_timeout.total_seconds())
            else:
                break
开发者ID:CrowdStrike,项目名称:cs.eyrie,代码行数:66,代码来源:drain.py

示例8: Queue

# 需要导入模块: from tornado.locks import Event [as 别名]
# 或者: from tornado.locks.Event import clear [as 别名]

#.........这里部分代码省略.........
        elif self.full():
            raise QueueFull
        else:
            self._put(item)

    def get(self, timeout=None):
        """Remove and return an item from the queue.

        Returns a Future which resolves once an item is available, or raises
        `tornado.gen.TimeoutError` after a timeout.
        """
        future = Future()
        try:
            future.set_result(self.get_nowait())
        except QueueEmpty:
            self._getters.append(future)
            _set_timeout(future, timeout)
        return future

    def get_nowait(self):
        """Remove and return an item from the queue without blocking.

        Return an item if one is immediately available, else raise
        `QueueEmpty`.
        """
        self._consume_expired()
        if self._putters:
            assert self.full(), "queue not full, why are putters waiting?"
            item, putter = self._putters.popleft()
            self._put(item)
            putter.set_result(None)
            return self._get()
        elif self.qsize():
            return self._get()
        else:
            raise QueueEmpty

    def task_done(self):
        """Indicate that a formerly enqueued task is complete.

        Used by queue consumers. For each `.get` used to fetch a task, a
        subsequent call to `.task_done` tells the queue that the processing
        on the task is complete.

        If a `.join` is blocking, it resumes when all items have been
        processed; that is, when every `.put` is matched by a `.task_done`.

        Raises `ValueError` if called more times than `.put`.
        """
        if self._unfinished_tasks <= 0:
            raise ValueError('task_done() called too many times')
        self._unfinished_tasks -= 1
        if self._unfinished_tasks == 0:
            self._finished.set()

    def join(self, timeout=None):
        """Block until all items in the queue are processed. Returns a Future.

        Returns a Future, which raises `tornado.gen.TimeoutError` after a
        timeout.
        """
        return self._finished.wait(timeout)

    def _init(self):
        self._queue = collections.deque()

    def _get(self):
        return self._queue.popleft()

    def _put(self, item):
        self._unfinished_tasks += 1
        self._finished.clear()
        self._queue.append(item)

    def _consume_expired(self):
        # Remove timed-out waiters.
        while self._putters and self._putters[0][1].done():
            self._putters.popleft()

        while self._getters and self._getters[0].done():
            self._getters.popleft()

    def __repr__(self):
        return '<%s at %s %s>' % (
            type(self).__name__, hex(id(self)), self._format())

    def __str__(self):
        return '<%s %s>' % (type(self).__name__, self._format())

    def _format(self):
        result = 'maxsize=%r' % (self.maxsize, )
        if getattr(self, '_queue', None):
            result += ' queue=%r' % self._queue
        if self._getters:
            result += ' getters[%s]' % len(self._getters)
        if self._putters:
            result += ' putters[%s]' % len(self._putters)
        if self._unfinished_tasks:
            result += ' tasks=%s' % self._unfinished_tasks
        return result
开发者ID:zhiyajun11,项目名称:tornado,代码行数:104,代码来源:queues.py

示例9: XBeeBase

# 需要导入模块: from tornado.locks import Event [as 别名]
# 或者: from tornado.locks.Event import clear [as 别名]
class XBeeBase(_XBeeBase):
    """
    Abstract base class providing command generation and response
    parsing methods for XBee modules.

    Constructor arguments:
        ser:    The file-like serial port to use.

        shorthand: boolean flag which determines whether shorthand command
                   calls (i.e. xbee.at(...) instead of xbee.send("at",...)
                   are allowed.

        callback: function which should be called with frame data
                  whenever a frame arrives from the serial port.

        escaped: boolean flag which determines whether the library should
                 operate in escaped mode. In this mode, certain data bytes
                 in the output and input streams will be escaped and unescaped
                 in accordance with the XBee API. This setting must match
                 the appropriate api_mode setting of an XBee device; see your
                 XBee device's documentation for more information.

        error_callback: function which should be called with an Exception
                 whenever an exception is raised while waiting for data from
                 the serial port. This will only take affect if the callback
                 argument is also used.
    """
    def __init__(self, *args, **kwargs):
        if 'io_loop' in kwargs:
            self._ioloop = kwargs.pop('io_loop')
        else:
            self._ioloop = ioloop.IOLoop.current()

        super(XBeeBase, self).__init__(*args, **kwargs)

        self._running = Event()
        self._running.set()

        self._frame_future = None
        self._frame_queue = deque()

        if self._callback:
            # Make Non-Blocking
            self.serial.timeout = 0
            self.process_frames()

        self._ioloop.add_handler(self.serial.fd,
                                 self._process_input,
                                 ioloop.IOLoop.READ)

    def halt(self):
        """
        halt: None -> None

        Stop the event, and remove the FD from the loop handler
        """
        if self._callback:
            self._running.clear()
            self._ioloop.remove_handler(self.serial.fd)

            if self._frame_future is not None:
                self._frame_future.set_result(None)
                self._frame_future = None

    @gen.coroutine
    def process_frames(self):
        """
        process_frames: None -> None

        Wait for a frame to become available, when resolved call the callback
        """
        while self._running.is_set():
            try:
                frame = yield self._get_frame()
                info = self._split_response(frame.data)
                if info is not None:
                    self._callback(info)
            except Exception as e:
                # Unexpected quit.
                if self._error_callback:
                    self._error_callback(e)

    @gen.coroutine
    def wait_read_frame(self, timeout=None):
        frame = yield self._get_frame(timeout=timeout)
        raise gen.Return(self._split_response(frame.data))

    def _get_frame(self, timeout=None):
        future = Future()
        if self._frame_queue:
            future.set_result(self._frame_queue.popleft())
        else:
            if timeout is not None:
                def on_timeout():
                    future.set_exception(_TimeoutException())

                handle = self._ioloop.add_timeout(
                    self._ioloop.time() + timeout, on_timeout
                )
                future.add_done_callback(lambda _:
#.........这里部分代码省略.........
开发者ID:nioinnovation,项目名称:python-xbee,代码行数:103,代码来源:base.py

示例10: AsyncKazooLock

# 需要导入模块: from tornado.locks import Event [as 别名]
# 或者: from tornado.locks.Event import clear [as 别名]

#.........这里部分代码省略.........
  def _watch_session(self, state):
    self.wake_event.set()
    return True

  def _watch_session_listener(self, state):
    IOLoop.current().add_callback(self._watch_session, state)

  @gen.coroutine
  def _inner_acquire(self, timeout, ephemeral=True):

    # wait until it's our chance to get it..
    if self.is_acquired:
      raise ForceRetryError()

    # make sure our election parent node exists
    if not self.assured_path:
      yield self._ensure_path()

    node = None
    if self.create_tried:
      node = yield self._find_node()
    else:
      self.create_tried = True

    if not node:
      node = yield self.tornado_kazoo.create(
        self.create_path, self.data, ephemeral=ephemeral, sequence=True)
      # strip off path to node
      node = node[len(self.path) + 1:]

    self.node = node

    while True:
      self.wake_event.clear()

      # bail out with an exception if cancellation has been requested
      if self.cancelled:
        raise CancelledError()

      children = yield self._get_sorted_children()

      try:
        our_index = children.index(node)
      except ValueError:  # pragma: nocover
        # somehow we aren't in the children -- probably we are
        # recovering from a session failure and our ephemeral
        # node was removed
        raise ForceRetryError()

      predecessor = self.predecessor(children, our_index)
      if not predecessor:
        raise gen.Return(True)

      # otherwise we are in the mix. watch predecessor and bide our time
      predecessor = self.path + "/" + predecessor
      self.client.add_listener(self._watch_session_listener)
      try:
        yield self.tornado_kazoo.get(predecessor, self._watch_predecessor)
      except NoNodeError:
        pass  # predecessor has already been deleted
      else:
        try:
          yield self.wake_event.wait(timeout)
        except gen.TimeoutError:
          raise LockTimeout("Failed to acquire lock on %s after "
                            "%s seconds" % (self.path, timeout))
开发者ID:AppScale,项目名称:appscale,代码行数:70,代码来源:tornado_kazoo.py

示例11: __init__

# 需要导入模块: from tornado.locks import Event [as 别名]
# 或者: from tornado.locks.Event import clear [as 别名]

#.........这里部分代码省略.........
                response = ujson.loads(e.response.body.decode('utf-8'))
                raise ApiError(response['error_code'], response['description'], response.get('parameters'),
                               request_body=body)
            else:
                raise ApiError(e.code, None, request_body=body)

        return None

    @coroutine
    def get_updates(self, offset: int=None, limit: int=100, timeout: int=2, retry_on_nonuser_error: bool=False):
        assert 1 <= limit <= 100
        assert 0 <= timeout

        request = {
            'limit': limit,
            'timeout': timeout
        }

        if offset is not None:
            request['offset'] = offset

        data = yield self.__request_api('getUpdates', request, request_timeout=timeout * 1.5,
                                        retry_on_nonuser_error=retry_on_nonuser_error)

        if data is None:
            return []

        return data

    @coroutine
    def wait_commands(self, last_update_id=None):
        assert self._finished.is_set()

        self._finished.clear()

        self.consumption_state = self.STATE_WORKING

        if last_update_id is not None:
            last_update_id += 1

        yield self.get_me()

        while not self._finished.is_set():
            try:
                updates = yield self.get_updates(last_update_id, retry_on_nonuser_error=True)
            except:
                self._finished.set()
                raise

            for update in updates:
                yield maybe_future(self.processor(update))
                if 'update_id' in update:
                    last_update_id = update['update_id']

            if len(updates):
                last_update_id += 1

    @coroutine
    def send_chat_action(self, chat_id, action: str):
        return (yield self.__request_api('sendChatAction', {'chat_id': chat_id, 'action': action}))

    @coroutine
    def send_message(self, text: str, chat_id=None, reply_to_message: dict=None, parse_mode: str=None,
                     disable_web_page_preview: bool=False, disable_notification: bool=False,
                     reply_to_message_id: int=None, reply_markup=None):
        request = {
开发者ID:andrey-yantsen,项目名称:tobot,代码行数:70,代码来源:telegram.py

示例12: Queue

# 需要导入模块: from tornado.locks import Event [as 别名]
# 或者: from tornado.locks.Event import clear [as 别名]

#.........这里部分代码省略.........
        """从队列中删除并返回一个项目.

        返回一个Future对象, 当项目可用时resolve, 或者在超时后抛出
        `tornado.gen.TimeoutError` .
        """
        future = Future()
        try:
            future.set_result(self.get_nowait())
        except QueueEmpty:
            self._getters.append(future)
            _set_timeout(future, timeout)
        return future

    def get_nowait(self):
        """非阻塞的从队列中删除并返回一个项目.

        如果有项目是立即可用的则返回该项目, 否则抛出 `QueueEmpty`.
        """
        self._consume_expired()
        if self._putters:
            assert self.full(), "queue not full, why are putters waiting?"
            item, putter = self._putters.popleft()
            self.__put_internal(item)
            putter.set_result(None)
            return self._get()
        elif self.qsize():
            return self._get()
        else:
            raise QueueEmpty

    def task_done(self):
        """表明前面排队的任务已经完成.

        被消费者队列使用. 每个 `.get` 用来获取一个任务, 随后(subsequent)
        调用 `.task_done` 告诉队列正在处理的任务已经完成.

        如果 `.join` 正在阻塞, 它会在所有项目都被处理完后调起;
        即当每个 `.put` 都被一个 `.task_done` 匹配.

        如果调用次数超过 `.put` 将会抛出 `ValueError` .
        """
        if self._unfinished_tasks <= 0:
            raise ValueError('task_done() called too many times')
        self._unfinished_tasks -= 1
        if self._unfinished_tasks == 0:
            self._finished.set()

    def join(self, timeout=None):
        """阻塞(block)直到队列中的所有项目都处理完.

        返回一个Future对象, 超时后会抛出 `tornado.gen.TimeoutError` 异常.
        """
        return self._finished.wait(timeout)

    @gen.coroutine
    def __aiter__(self):
        return _QueueIterator(self)

    # These three are overridable in subclasses.
    def _init(self):
        self._queue = collections.deque()

    def _get(self):
        return self._queue.popleft()

    def _put(self, item):
        self._queue.append(item)
    # End of the overridable methods.

    def __put_internal(self, item):
        self._unfinished_tasks += 1
        self._finished.clear()
        self._put(item)

    def _consume_expired(self):
        # Remove timed-out waiters.
        while self._putters and self._putters[0][1].done():
            self._putters.popleft()

        while self._getters and self._getters[0].done():
            self._getters.popleft()

    def __repr__(self):
        return '<%s at %s %s>' % (
            type(self).__name__, hex(id(self)), self._format())

    def __str__(self):
        return '<%s %s>' % (type(self).__name__, self._format())

    def _format(self):
        result = 'maxsize=%r' % (self.maxsize, )
        if getattr(self, '_queue', None):
            result += ' queue=%r' % self._queue
        if self._getters:
            result += ' getters[%s]' % len(self._getters)
        if self._putters:
            result += ' putters[%s]' % len(self._putters)
        if self._unfinished_tasks:
            result += ' tasks=%s' % self._unfinished_tasks
        return result
开发者ID:cufrancis,项目名称:tornado-zh,代码行数:104,代码来源:queues.py

示例13: Application

# 需要导入模块: from tornado.locks import Event [as 别名]
# 或者: from tornado.locks.Event import clear [as 别名]
class Application(object):
    def __init__(self, routes, node, pipe):
        """
        Application instantiates and registers handlers for each message type,
        and routes messages to the pre-instantiated instances of each message handler

        :param routes: list of tuples in the form of (<message type str>, <MessageHandler class>)
        :param node: Node instance of the local node
        :param pipe: Instance of multiprocessing.Pipe for communicating with the parent process
        """
        # We don't really have to worry about synchronization
        # so long as we're careful about explicit context switching
        self.nodes = {node.node_id: node}

        self.local_node = node
        self.handlers = {}

        self.tcpclient = TCPClient()

        self.gossip_inbox = Queue()
        self.gossip_outbox = Queue()

        self.sequence_number = 0

        if routes:
            self.add_handlers(routes)

        self.pipe = pipe
        self.ioloop = IOLoop.current()

        self.add_node_event = Event()

    def next_sequence_number(self):
        self.sequence_number += 1
        return self.sequence_number

    @coroutine
    def ping_random_node(self):
        node = yield self.get_random_node()
        LOGGER.debug('{} pinging random node: {}'.format(self.local_node.node_id,
                                                         node.node_id))
        try:
            yield self.ping(node)
        except TimeoutError:
            self.mark_suspect(node)

    @coroutine
    def add_node(self, node):
        if node.node_id not in self.nodes:
            LOGGER.debug('Adding node {} to {}'.format(node, self.nodes))
            self.add_node_event.set()
            self.nodes[node.node_id] = node
            LOGGER.debug('Added node {} to {}'.format(node, self.nodes))

    @coroutine
    def remove_node(self, node):
        if node.node_id in self.nodes:
            del self.nodes[node.node_id]

            other_nodes = yield self.get_other_nodes
            if not other_nodes:
                self.add_node_event.clear()

    def add_handlers(self, handlers):
        for message_type, handler_cls in handlers:
            assert message_type in MESSAGE_TYPES, (
                'Message type {!r} not found in MESSAGE TYPES {}'.format(
                    message_type,
                    MESSAGE_TYPES.keys()
                )
            )
            self.handlers[message_type] = handler_cls(self)

    def route_stream_message(self, stream, message_type, message):
        LOGGER.debug('{!r} received {} message from {!r}'.format(self, message_type, stream))
        message_cls = MESSAGE_TYPES[message_type]
        message_obj = message_cls(**message)

        handler = self.handlers[message_type]
        LOGGER.debug('Routing {} to {}'.format(message_type, handler))
        handler(stream, message_obj)

    @coroutine
    def send_message(self, stream, message):
        LOGGER.debug('Sending message {!r} to {}'.format(message.MESSAGE_TYPE, stream))
        try:
            yield stream.write(message.to_msgpack)
        except StreamClosedError:
            LOGGER.warn('Unable to send {} to {} - stream closed'.format(message.MESSAGE_TYPE, stream))

    @coroutine
    def _get_next_message(self, stream):
        # get the next message from the stream
        unpacker = msgpack.Unpacker()
        try:
            wire_bytes = yield with_timeout(
                datetime.timedelta(seconds=PING_TIMEOUT),
                stream.read_bytes(4096, partial=True)
            )
        except StreamClosedError:
#.........这里部分代码省略.........
开发者ID:jefffm,项目名称:swimpy,代码行数:103,代码来源:app.py

示例14: ProjectIndexManager

# 需要导入模块: from tornado.locks import Event [as 别名]
# 或者: from tornado.locks.Event import clear [as 别名]
class ProjectIndexManager(object):
  """ Keeps track of composite index definitions for a project. """

  def __init__(self, project_id, zk_client, index_manager, datastore_access):
    """ Creates a new ProjectIndexManager.

    Args:
      project_id: A string specifying a project ID.
      zk_client: A KazooClient.
      update_callback: A function that should be called with the project ID
        and index list every time the indexes get updated.
      index_manager: An IndexManager used for checking lock status.
      datastore_access: A DatastoreDistributed object.
    """
    self.project_id = project_id
    self.indexes_node = '/appscale/projects/{}/indexes'.format(self.project_id)
    self.active = True
    self.update_event = AsyncEvent()

    self._creation_times = {}
    self._index_manager = index_manager
    self._zk_client = zk_client
    self._ds_access = datastore_access

    self._zk_client.DataWatch(self.indexes_node, self._update_indexes_watch)

    # Since this manager can be used synchronously, ensure that the indexes
    # are populated for this IOLoop iteration.
    try:
      encoded_indexes = self._zk_client.get(self.indexes_node)[0]
    except NoNodeError:
      encoded_indexes = '[]'

    self.indexes = [DatastoreIndex.from_dict(self.project_id, index)
                    for index in json.loads(encoded_indexes)]

  @property
  def indexes_pb(self):
    if self._zk_client.state != KazooState.CONNECTED:
      raise IndexInaccessible('ZooKeeper connection is not active')

    return [index.to_pb() for index in self.indexes]

  @gen.coroutine
  def apply_definitions(self):
    """ Populate composite indexes that are not marked as ready yet. """
    try:
      yield self.update_event.wait()
      self.update_event.clear()
      if not self._index_manager.admin_lock.is_acquired or not self.active:
        return

      logger.info(
        'Applying composite index definitions for {}'.format(self.project_id))

      for index in self.indexes:
        if index.ready:
          continue

        # Wait until all clients have either timed out or received the new index
        # definition. This prevents entities from being added without entries
        # while the index is being rebuilt.
        creation_time = self._creation_times.get(index.id, time.time())
        consensus = creation_time + (self._zk_client._session_timeout / 1000.0)
        yield gen.sleep(max(consensus - time.time(), 0))

        yield self._ds_access.update_composite_index(
          self.project_id, index.to_pb())
        logger.info('Index {} is now ready'.format(index.id))
        self._mark_index_ready(index.id)

      logging.info(
        'All composite indexes for {} are ready'.format(self.project_id))
    finally:
      IOLoop.current().spawn_callback(self.apply_definitions)

  def delete_index_definition(self, index_id):
    """ Remove a definition from a project's list of configured indexes.

    Args:
      index_id: An integer specifying an index ID.
    """
    try:
      encoded_indexes, znode_stat = self._zk_client.get(self.indexes_node)
    except NoNodeError:
      # If there are no index definitions, there is nothing to do.
      return

    node_version = znode_stat.version
    indexes = [DatastoreIndex.from_dict(self.project_id, index)
               for index in json.loads(encoded_indexes)]

    encoded_indexes = json.dumps([index.to_dict() for index in indexes
                                  if index.id != index_id])
    self._zk_client.set(self.indexes_node, encoded_indexes,
                        version=node_version)

  def _mark_index_ready(self, index_id):
    """ Updates the index metadata to reflect the new state of the index.

#.........这里部分代码省略.........
开发者ID:AppScale,项目名称:appscale,代码行数:103,代码来源:index_manager.py

示例15: TornadoReconnectionManager

# 需要导入模块: from tornado.locks import Event [as 别名]
# 或者: from tornado.locks.Event import clear [as 别名]
class TornadoReconnectionManager(ReconnectionManager):
    def __init__(self, pubnub):
        self._cancelled_event = Event()
        super(TornadoReconnectionManager, self).__init__(pubnub)

    @gen.coroutine
    def _register_heartbeat_timer(self):
        self._cancelled_event.clear()

        while not self._cancelled_event.is_set():
            if self._pubnub.config.reconnect_policy == PNReconnectionPolicy.EXPONENTIAL:
                self._timer_interval = int(math.pow(2, self._connection_errors) - 1)
                if self._timer_interval > self.MAXEXPONENTIALBACKOFF:
                    self._timer_interval = self.MINEXPONENTIALBACKOFF
                    self._connection_errors = 1
                    logger.debug("timerInterval > MAXEXPONENTIALBACKOFF at: %s" % utils.datetime_now())
                elif self._timer_interval < 1:
                    self._timer_interval = self.MINEXPONENTIALBACKOFF
                logger.debug("timerInterval = %d at: %s" % (self._timer_interval, utils.datetime_now()))
            else:
                self._timer_interval = self.INTERVAL

            # >>> Wait given interval or cancel
            sleeper = tornado.gen.sleep(self._timer_interval)
            canceller = self._cancelled_event.wait()

            wi = tornado.gen.WaitIterator(canceller, sleeper)

            while not wi.done():
                try:
                    future = wi.next()
                    yield future
                except Exception as e:
                    # TODO: verify the error will not be eaten
                    logger.error(e)
                    raise
                else:
                    if wi.current_future == sleeper:
                        break
                    elif wi.current_future == canceller:
                        return
                    else:
                        raise Exception("unknown future raised")

            logger.debug("reconnect loop at: %s" % utils.datetime_now())

            # >>> Attempt to request /time/0 endpoint
            try:
                yield self._pubnub.time().result()
                self._connection_errors = 1
                self._callback.on_reconnect()
                logger.debug("reconnection manager stop due success time endpoint call: %s" % utils.datetime_now())
                break
            except Exception:
                if self._pubnub.config.reconnect_policy == PNReconnectionPolicy.EXPONENTIAL:
                    logger.debug("reconnect interval increment at: %s" % utils.datetime_now())
                    self._connection_errors += 1

    def start_polling(self):
        if self._pubnub.config.reconnect_policy == PNReconnectionPolicy.NONE:
            logger.warn("reconnection policy is disabled, please handle reconnection manually.")
            return

        self._pubnub.ioloop.spawn_callback(self._register_heartbeat_timer)

    def stop_polling(self):
        if self._cancelled_event is not None and not self._cancelled_event.is_set():
            self._cancelled_event.set()
开发者ID:pubnub,项目名称:python,代码行数:70,代码来源:pubnub_tornado.py


注:本文中的tornado.locks.Event.clear方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。