当前位置: 首页>>代码示例>>Python>>正文


Python Queue.put_nowait方法代码示例

本文整理汇总了Python中tornado.queues.Queue.put_nowait方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.put_nowait方法的具体用法?Python Queue.put_nowait怎么用?Python Queue.put_nowait使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tornado.queues.Queue的用法示例。


在下文中一共展示了Queue.put_nowait方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_multi_queues

# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import put_nowait [as 别名]
def test_multi_queues(s, a, b):
    sched, report = Queue(), Queue()
    s.handle_queues(sched, report)

    msg = yield report.get()
    assert msg['op'] == 'stream-start'

    # Test update graph
    sched.put_nowait({'op': 'update-graph',
                      'dsk': {'x': (inc, 1),
                              'y': (inc, 'x'),
                              'z': (inc, 'y')},
                      'keys': ['z']})

    while True:
        msg = yield report.get()
        if msg['op'] == 'key-in-memory' and msg['key'] == 'z':
            break

    slen, rlen = len(s.scheduler_queues), len(s.report_queues)
    sched2, report2 = Queue(), Queue()
    s.handle_queues(sched2, report2)
    assert slen + 1 == len(s.scheduler_queues)
    assert rlen + 1 == len(s.report_queues)

    sched2.put_nowait({'op': 'update-graph',
                       'dsk': {'a': (inc, 10)},
                       'keys': ['a']})

    for q in [report, report2]:
        while True:
            msg = yield q.get()
            if msg['op'] == 'key-in-memory' and msg['key'] == 'a':
                break
开发者ID:lucashtnguyen,项目名称:distributed,代码行数:36,代码来源:test_scheduler.py

示例2: test_diagnostic

# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import put_nowait [as 别名]
def test_diagnostic(s, a, b):
    sched, report = Queue(), Queue(); s.handle_queues(sched, report)
    msg = yield report.get(); assert msg['op'] == 'stream-start'

    class Counter(SchedulerPlugin):
        def start(self, scheduler):
            scheduler.add_plugin(self)
            self.count = 0

        def task_finished(self, scheduler, key, worker, nbytes):
            self.count += 1

    counter = Counter()
    counter.start(s)

    assert counter.count == 0
    sched.put_nowait({'op': 'update-graph',
                      'tasks': {'x': dumps_task((inc, 1)),
                                'y': dumps_task((inc, 'x')),
                                'z': dumps_task((inc, 'y'))},
                      'dependencies': {'y': ['x'], 'z': ['y']},
                      'keys': ['z']})

    while True:
        msg = yield report.get()
        if msg['op'] == 'key-in-memory' and msg['key'] == 'z':
            break

    assert counter.count == 3
开发者ID:dela3499,项目名称:distributed,代码行数:31,代码来源:test_plugin.py

示例3: f

# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import put_nowait [as 别名]
    def f(c, a, b):
        s = Scheduler((c.ip, c.port), loop=loop)
        yield s._sync_center()
        done = s.start()
        sched, report = Queue(), Queue(); s.handle_queues(sched, report)
        msg = yield report.get(); assert msg['op'] == 'stream-start'

        s.update_graph(dsk={'x': (div, 1, 0)},
                       keys=['x'])
        progress = TextProgressBar(['x'], scheduler=s)
        progress.start()

        while True:
            msg = yield report.get()
            if msg.get('key') == 'x':
                break

        assert progress.status == 'error'
        assert not progress._timer.is_alive()

        progress = TextProgressBar(['x'], scheduler=s)
        progress.start()
        assert progress.status == 'error'
        assert not progress._timer or not progress._timer.is_alive()

        sched.put_nowait({'op': 'close'})
        yield done
开发者ID:aterrel,项目名称:distributed,代码行数:29,代码来源:test_diagnostics.py

示例4: Stream

# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import put_nowait [as 别名]
class Stream(object):
    def __init__(self, raw_headers, header_table):
        self._queue = Queue()
        self._header_table = header_table
        self._current_headers = self._header_table.merge(raw_headers)

    @gen.coroutine
    def get(self, timeout=0):
        if timeout == 0:
            res, headers = yield self._queue.get()
        else:
            deadline = datetime.timedelta(seconds=timeout)
            res, headers = yield self._queue.get(deadline)

        self._current_headers = headers
        if isinstance(res, Exception):
            raise res
        else:
            raise gen.Return(res)

    def push(self, item, raw_headers):
        headers = self._header_table.merge(raw_headers)
        self._queue.put_nowait((item, headers))

    def done(self, raw_headers):
        headers = self._header_table.merge(raw_headers)
        return self._queue.put_nowait((ChokeEvent(), headers))

    def error(self, errnumber, reason, raw_headers):
        headers = self._header_table.merge(raw_headers)
        return self._queue.put_nowait((RequestError(errnumber, reason), headers))

    @property
    def headers(self):
        return self._current_headers
开发者ID:antmat,项目名称:cocaine-framework-python,代码行数:37,代码来源:request.py

示例5: f

# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import put_nowait [as 别名]
    def f(c, a, b):
        s = Scheduler((c.ip, c.port), loop=loop)
        yield s._sync_center()
        done = s.start()
        sched, report = Queue(), Queue(); s.handle_queues(sched, report)
        msg = yield report.get(); assert msg['op'] == 'stream-start'

        s.update_graph(dsk={'x-1': (inc, 1),
                            'x-2': (inc, 'x-1'),
                            'x-3': (inc, 'x-2'),
                            'y-1': (dec, 'x-3'),
                            'y-2': (dec, 'y-1'),
                            'e': (throws, 'y-2'),
                            'other': (inc, 123)},
                       keys=['e'])

        while True:
            msg = yield report.get()
            if msg['op'] == 'key-in-memory' and msg['key'] == 'y-2':
                break

        p = MultiProgressWidget(['x-1', 'x-2', 'x-3'], scheduler=s)
        assert set(concat(p.all_keys.values())).issuperset({'x-1', 'x-2', 'x-3'})
        assert 'x' in p.bars

        sched.put_nowait({'op': 'close'})
        yield done
开发者ID:aterrel,项目名称:distributed,代码行数:29,代码来源:test_widgets.py

示例6: test_robust_to_bad_plugin

# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import put_nowait [as 别名]
def test_robust_to_bad_plugin(s, a, b):
    sched, report = Queue(), Queue(); s.handle_queues(sched, report)

    class Bad(SchedulerPlugin):
        def task_finished(self, scheduler, key, worker, nbytes):
            raise Exception()

    bad = Bad()
    s.add_plugin(bad)

    sched.put_nowait({'op': 'update-graph',
                      'dsk': {'x': (inc, 1),
                              'y': (inc, 'x'),
                              'z': (inc, 'y')},
                      'keys': ['z']})

    while True:  # normal execution
        msg = yield report.get()
        if msg['op'] == 'key-in-memory' and msg['key'] == 'z':
            break
开发者ID:lucashtnguyen,项目名称:distributed,代码行数:22,代码来源:test_progress.py

示例7: Rx

# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import put_nowait [as 别名]
class Rx(PrettyPrintable):
    def __init__(self, rx_tree, io_loop=None, servicename=None):
        # If it's not the main thread
        # and a current IOloop doesn't exist here,
        # IOLoop.instance becomes self._io_loop
        self._io_loop = io_loop or IOLoop.current()
        self._queue = Queue()
        self._done = False
        self.servicename = servicename
        self.rx_tree = rx_tree
        self.default_protocol = detect_protocol_type(rx_tree)

    @coroutine
    def get(self, timeout=0, protocol=None):
        if self._done and self._queue.empty():
            raise ChokeEvent()

        # to pull variuos service errors
        if timeout <= 0 or timeout is None:
            item = yield self._queue.get()
        else:
            deadline = datetime.timedelta(seconds=timeout)
            item = yield self._queue.get(deadline)

        if isinstance(item, Exception):
            raise item

        if protocol is None:
            protocol = self.default_protocol

        name, payload = item
        res = protocol(name, payload)
        if isinstance(res, ProtocolError):
            raise ServiceError(self.servicename, res.reason,
                               res.code, res.category)
        else:
            raise Return(res)

    def done(self):
        self._done = True

    def push(self, msg_type, payload):
        dispatch = self.rx_tree.get(msg_type)
        log.debug("dispatch %s %.300s", dispatch, payload)
        if dispatch is None:
            raise InvalidMessageType(self.servicename, CocaineErrno.INVALIDMESSAGETYPE,
                                     "unexpected message type %s" % msg_type)
        name, rx = dispatch
        log.debug("name `%s` rx `%s`", name, rx)
        self._queue.put_nowait((name, payload))
        if rx == {}:  # the last transition
            self.done()
        elif rx is not None:  # not a recursive transition
            self.rx_tree = rx

    def error(self, err):
        self._queue.put_nowait(err)

    def closed(self):
        return self._done

    def _format(self):
        return "name: %s, queue: %s, done: %s" % (
            self.servicename, self._queue, self._done)
开发者ID:Alukardd,项目名称:cocaine-framework-python,代码行数:66,代码来源:channel.py

示例8: Scheduler

# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import put_nowait [as 别名]
class Scheduler(object):
    def __init__(self, center, delete_batch_time=1):
        self.scheduler_queue = Queue()
        self.report_queue = Queue()
        self.delete_queue = Queue()
        self.status = None

        self.center = coerce_to_rpc(center)

        self.dask = dict()
        self.dependencies = dict()
        self.dependents = dict()
        self.generation = 0
        self.has_what = defaultdict(set)
        self.held_data = set()
        self.in_play = set()
        self.keyorder = dict()
        self.nbytes = dict()
        self.ncores = dict()
        self.processing = dict()
        self.restrictions = dict()
        self.stacks = dict()
        self.waiting = dict()
        self.waiting_data = dict()
        self.who_has = defaultdict(set)

        self.exceptions = dict()
        self.tracebacks = dict()
        self.exceptions_blame = dict()

        self.delete_batch_time = delete_batch_time

    @gen.coroutine
    def _sync_center(self):
        self.ncores, self.has_what, self.who_has = yield [
                self.center.ncores(),
                self.center.has_what(),
                self.center.who_has()]

    def start(self):
        collections = [self.dask, self.dependencies, self.dependents,
                self.waiting, self.waiting_data, self.in_play, self.keyorder,
                self.nbytes, self.processing, self.restrictions]
        for collection in collections:
            collection.clear()

        self.processing = {addr: set() for addr in self.ncores}
        self.stacks = {addr: list() for addr in self.ncores}

        self.worker_queues = {addr: Queue() for addr in self.ncores}

        self.coroutines = ([
             self.scheduler(),
             delete(self.scheduler_queue, self.delete_queue,
                    self.center.ip, self.center.port,
                    self.delete_batch_time)]
            + [worker(self.scheduler_queue, self.worker_queues[w], w, n)
               for w, n in self.ncores.items()])

        for cor in self.coroutines:
            if cor.done():
                raise cor.exception()

        return All(self.coroutines)

    @gen.coroutine
    def _close(self):
        self.scheduler_queue.put_nowait({'op': 'close'})
        yield All(self.coroutines)

    @gen.coroutine
    def cleanup(self):
        """ Clean up queues and coroutines, prepare to stop """
        logger.debug("Cleaning up coroutines")
        n = 0
        self.delete_queue.put_nowait({'op': 'close'}); n += 1
        for w, nc in self.ncores.items():
            for i in range(nc):
                self.worker_queues[w].put_nowait({'op': 'close'}); n += 1

        for i in range(n):
            yield self.scheduler_queue.get()

    def mark_ready_to_run(self, key):
        """ Send task to an appropriate worker, trigger worker """
        logger.debug("Mark %s ready to run", key)
        if key in self.waiting:
            assert not self.waiting[key]
            del self.waiting[key]

        new_worker = decide_worker(self.dependencies, self.stacks,
                self.who_has, self.restrictions, self.nbytes, key)

        self.stacks[new_worker].append(key)
        self.ensure_occupied(new_worker)

    def mark_key_in_memory(self, key, workers=None):
        logger.debug("Mark %s in memory", key)
        if workers is None:
            workers = self.who_has[key]
#.........这里部分代码省略.........
开发者ID:freeman-lab,项目名称:distributed,代码行数:103,代码来源:scheduler.py

示例9: Rx

# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import put_nowait [as 别名]
class Rx(PrettyPrintable):
    def __init__(self, rx_tree, session_id, header_table=None, io_loop=None, service_name=None,
                 raw_headers=None, trace_id=None):
        if header_table is None:
            header_table = CocaineHeaders()

        # If it's not the main thread
        # and a current IOloop doesn't exist here,
        # IOLoop.instance becomes self._io_loop
        self._io_loop = io_loop or IOLoop.current()
        self._queue = Queue()
        self._done = False
        self.session_id = session_id
        self.service_name = service_name
        self.rx_tree = rx_tree
        self.default_protocol = detect_protocol_type(rx_tree)
        self._headers = header_table
        self._current_headers = self._headers.merge(raw_headers)
        self.log = get_trace_adapter(log, trace_id)

    @coroutine
    def get(self, timeout=0, protocol=None):
        if self._done and self._queue.empty():
            raise ChokeEvent()

        # to pull various service errors
        if timeout <= 0:
            item = yield self._queue.get()
        else:
            deadline = datetime.timedelta(seconds=timeout)
            item = yield self._queue.get(deadline)

        if isinstance(item, Exception):
            raise item

        if protocol is None:
            protocol = self.default_protocol

        name, payload, raw_headers = item
        self._current_headers = self._headers.merge(raw_headers)
        res = protocol(name, payload)
        if isinstance(res, ProtocolError):
            raise ServiceError(self.service_name, res.reason, res.code, res.category)
        else:
            raise Return(res)

    def done(self):
        self._done = True

    def push(self, msg_type, payload, raw_headers):
        dispatch = self.rx_tree.get(msg_type)
        self.log.debug("dispatch %s %.300s", dispatch, payload)
        if dispatch is None:
            raise InvalidMessageType(self.service_name, CocaineErrno.INVALIDMESSAGETYPE,
                                     "unexpected message type %s" % msg_type)
        name, rx = dispatch
        self.log.info(
            "got message from `%s`: channel id: %s, type: %s",
            self.service_name,
            self.session_id,
            name
        )
        self._queue.put_nowait((name, payload, raw_headers))
        if rx == {}:  # the last transition
            self.done()
        elif rx is not None:  # not a recursive transition
            self.rx_tree = rx

    def error(self, err):
        self._queue.put_nowait(err)

    def closed(self):
        return self._done

    def _format(self):
        return "name: %s, queue: %s, done: %s" % (self.service_name, self._queue, self._done)

    @property
    def headers(self):
        return self._current_headers
开发者ID:antmat,项目名称:cocaine-framework-python,代码行数:82,代码来源:channel.py

示例10: SubscribeListener

# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import put_nowait [as 别名]
class SubscribeListener(SubscribeCallback):
    def __init__(self):
        self.connected = False
        self.connected_event = Event()
        self.disconnected_event = Event()
        self.presence_queue = Queue()
        self.message_queue = Queue()
        self.error_queue = Queue()

    def status(self, pubnub, status):
        if utils.is_subscribed_event(status) and not self.connected_event.is_set():
            self.connected_event.set()
        elif utils.is_unsubscribed_event(status) and not self.disconnected_event.is_set():
            self.disconnected_event.set()
        elif status.is_error():
            self.error_queue.put_nowait(status.error_data.exception)

    def message(self, pubnub, message):
        self.message_queue.put(message)

    def presence(self, pubnub, presence):
        self.presence_queue.put(presence)

    @tornado.gen.coroutine
    def _wait_for(self, coro):
        error = self.error_queue.get()
        wi = tornado.gen.WaitIterator(coro, error)

        while not wi.done():
            result = yield wi.next()

            if wi.current_future == coro:
                raise gen.Return(result)
            elif wi.current_future == error:
                raise result
            else:
                raise Exception("Unexpected future resolved: %s" % str(wi.current_future))

    @tornado.gen.coroutine
    def wait_for_connect(self):
        if not self.connected_event.is_set():
            yield self._wait_for(self.connected_event.wait())
        else:
            raise Exception("instance is already connected")

    @tornado.gen.coroutine
    def wait_for_disconnect(self):
        if not self.disconnected_event.is_set():
            yield self._wait_for(self.disconnected_event.wait())
        else:
            raise Exception("instance is already disconnected")

    @tornado.gen.coroutine
    def wait_for_message_on(self, *channel_names):
        channel_names = list(channel_names)
        while True:
            try: # NOQA
                env = yield self._wait_for(self.message_queue.get())
                if env.channel in channel_names:
                    raise tornado.gen.Return(env)
                else:
                    continue
            finally:
                self.message_queue.task_done()

    @tornado.gen.coroutine
    def wait_for_presence_on(self, *channel_names):
        channel_names = list(channel_names)
        while True:
            try:
                try:
                    env = yield self._wait_for(self.presence_queue.get())
                except: # NOQA E722 pylint: disable=W0702
                    break
                if env.channel in channel_names:
                    raise tornado.gen.Return(env)
                else:
                    continue
            finally:
                self.presence_queue.task_done()
开发者ID:pubnub,项目名称:python,代码行数:82,代码来源:pubnub_tornado.py

示例11: Executor

# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import put_nowait [as 别名]
class Executor(object):
    """ Distributed executor with data dependencies

    This executor resembles executors in concurrent.futures but also allows
    Futures within submit/map calls.

    Provide center address on initialization

    >>> executor = Executor(('127.0.0.1', 8787))  # doctest: +SKIP

    Use ``submit`` method like normal

    >>> a = executor.submit(add, 1, 2)  # doctest: +SKIP
    >>> b = executor.submit(add, 10, 20)  # doctest: +SKIP

    Additionally, provide results of submit calls (futures) to further submit
    calls:

    >>> c = executor.submit(add, a, b)  # doctest: +SKIP

    This allows for the dynamic creation of complex dependencies.
    """
    def __init__(self, center, start=True, delete_batch_time=1):
        self.center = coerce_to_rpc(center)
        self.futures = dict()
        self.refcount = defaultdict(lambda: 0)
        self.dask = dict()
        self.restrictions = dict()
        self.loop = IOLoop()
        self.report_queue = Queue()
        self.scheduler_queue = Queue()
        self._shutdown_event = Event()
        self._delete_batch_time = delete_batch_time

        if start:
            self.start()

    def start(self):
        """ Start scheduler running in separate thread """
        from threading import Thread
        self.loop.add_callback(self._go)
        self._loop_thread = Thread(target=self.loop.start)
        self._loop_thread.start()

    def __enter__(self):
        if not self.loop._running:
            self.start()
        return self

    def __exit__(self, type, value, traceback):
        self.shutdown()

    def _inc_ref(self, key):
        self.refcount[key] += 1

    def _dec_ref(self, key):
        self.refcount[key] -= 1
        if self.refcount[key] == 0:
            del self.refcount[key]
            self._release_key(key)

    def _release_key(self, key):
        """ Release key from distributed memory """
        self.futures[key]['event'].clear()
        logger.debug("Release key %s", key)
        del self.futures[key]
        self.scheduler_queue.put_nowait({'op': 'release-held-data',
                                         'key': key})

    @gen.coroutine
    def report(self):
        """ Listen to scheduler """
        while True:
            msg = yield self.report_queue.get()
            if msg['op'] == 'close':
                break
            if msg['op'] == 'task-finished':
                if msg['key'] in self.futures:
                    self.futures[msg['key']]['status'] = 'finished'
                    self.futures[msg['key']]['event'].set()
            if msg['op'] == 'lost-data':
                if msg['key'] in self.futures:
                    self.futures[msg['key']]['status'] = 'lost'
                    self.futures[msg['key']]['event'].clear()
            if msg['op'] == 'task-erred':
                if msg['key'] in self.futures:
                    self.futures[msg['key']]['status'] = 'error'
                    self.futures[msg['key']]['event'].set()

    @gen.coroutine
    def _shutdown(self):
        """ Send shutdown signal and wait until _go completes """
        self.report_queue.put_nowait({'op': 'close'})
        self.scheduler_queue.put_nowait({'op': 'close'})
        yield self._shutdown_event.wait()

    def shutdown(self):
        """ Send shutdown signal and wait until scheduler terminates """
        self.report_queue.put_nowait({'op': 'close'})
        self.scheduler_queue.put_nowait({'op': 'close'})
#.........这里部分代码省略.........
开发者ID:cowlicks,项目名称:distributed,代码行数:103,代码来源:executor.py

示例12: Scheduler

# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import put_nowait [as 别名]
class Scheduler(object):
    def __init__(self, center, delete_batch_time=1, loop=None,
            resource_interval=1, resource_log_size=1000):
        self.scheduler_queues = [Queue()]
        self.report_queues = []
        self.delete_queue = Queue()
        self.status = None
        self.coroutines = []

        self.center = coerce_to_rpc(center)

        self.dask = dict()
        self.dependencies = dict()
        self.dependents = dict()
        self.generation = 0
        self.has_what = defaultdict(set)
        self.held_data = set()
        self.in_play = set()
        self.keyorder = dict()
        self.nbytes = dict()
        self.ncores = dict()
        self.nannies = dict()
        self.processing = dict()
        self.restrictions = dict()
        self.stacks = dict()
        self.waiting = dict()
        self.waiting_data = dict()
        self.who_has = defaultdict(set)

        self.exceptions = dict()
        self.tracebacks = dict()
        self.exceptions_blame = dict()
        self.resource_logs = dict()

        self.loop = loop or IOLoop.current()

        self.delete_batch_time = delete_batch_time
        self.resource_interval = resource_interval
        self.resource_log_size = resource_log_size

        self.plugins = []

        self.handlers = {'update-graph': self.update_graph,
                         'update-data': self.update_data,
                         'missing-data': self.mark_missing_data,
                         'task-missing-data': self.mark_missing_data,
                         'worker-failed': self.mark_worker_missing,
                         'release-held-data': self.release_held_data,
                         'restart': self._restart}

    def put(self, msg):
        return self.scheduler_queues[0].put_nowait(msg)

    @property
    def report_queue(self):
        return self.report_queues[0]

    @gen.coroutine
    def _sync_center(self):
        self.ncores, self.has_what, self.who_has, self.nannies = yield [
                self.center.ncores(),
                self.center.has_what(),
                self.center.who_has(),
                self.center.nannies()]

        self._nanny_coroutines = []
        for (ip, wport), nport in self.nannies.items():
            if not nport:
                continue
            if (ip, nport) not in self.resource_logs:
                self.resource_logs[(ip, nport)] = deque(maxlen=self.resource_log_size)

            self._nanny_coroutines.append(self._nanny_listen(ip, nport))

    def start(self, start_queues=True):
        collections = [self.dask, self.dependencies, self.dependents,
                self.waiting, self.waiting_data, self.in_play, self.keyorder,
                self.nbytes, self.processing, self.restrictions]
        for collection in collections:
            collection.clear()

        self.processing = {addr: set() for addr in self.ncores}
        self.stacks = {addr: list() for addr in self.ncores}

        self.worker_queues = {addr: Queue() for addr in self.ncores}

        with ignoring(AttributeError):
            self._delete_coroutine.cancel()
        with ignoring(AttributeError):
            for c in self._worker_coroutines:
                c.cancel()

        self._delete_coroutine = self.delete()
        self._worker_coroutines = [self.worker(w) for w in self.ncores]

        self.heal_state()

        if start_queues:
            self.handle_queues(self.scheduler_queues[0], None)

#.........这里部分代码省略.........
开发者ID:aterrel,项目名称:distributed,代码行数:103,代码来源:scheduler.py

示例13: Application

# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import put_nowait [as 别名]

#.........这里部分代码省略.........
                    node=node,
                    sender=self.local_node)

        # Connect to the node
        try:
            stream = yield self.tcpclient.connect(host, port)
        except StreamClosedError:
            LOGGER.error('Unable to connect from {} to {} (pinging host)'.format(self.local_node.node_id, node.node_id))
            raise Return(False)

        try:
            # Send the ping
            LOGGER.debug('Sending {!r} to {!r}'.format(ping.MESSAGE_TYPE, node))
            yield self.send_message(stream, ping)

            # Wait for an ACK message in response
            LOGGER.debug('Getting next message from {}:{}'.format(host, port))
            message = yield self._get_next_message(stream)
            if message is None:
                raise Return(False)

            ack = Ack(**message)
            LOGGER.debug('Received {!r} from {!r} (response to {!r})'.format(ack.MESSAGE_TYPE,
                                                                             node.node_id,
                                                                             ping.MESSAGE_TYPE))

            # Check that the ACK sequence number matches the PING sequence number
            if ack.seqno == ping.seqno:
                LOGGER.debug('Sequence number matches. Node {} looks good to !'.format(node.node_id,
                                                                                       self.local_node.node_id))
                # Process the gossip messages tacked onto the ACK message's payload
                for message in ack.payload:
                    try:
                        self.gossip_inbox.put_nowait(message)
                    except QueueFull:
                        LOGGER.error('Unable to add {} message from {} to gossip inbox'.format(message.MESSAGE_TYPE,
                                                                                               node.node_id))
                # mark the node as ALIVE in self.nodes
                self.mark_alive(node)

                # Send gossip that this node is alive
                self.queue_gossip_send(
                    Alive(node=node, sender=self.local_node)
                )

                raise Return(True)
            else:
                raise Return(False)
        finally:
            stream.close()

    @coroutine
    def ack(self, stream, seqno):
        payload = []
        for _ in xrange(ACK_PAYLOAD_SIZE):
            try:
                gossip = self.gossip_outbox.get_nowait()
                payload.append(gossip)
            except QueueEmpty:
                break

        ack = Ack(seqno=seqno, payload=payload)
        LOGGER.debug('Trying to send ack: {}'.format(ack))
        try:
            yield stream.write(ack.to_msgpack)
        except StreamClosedError:
开发者ID:jefffm,项目名称:swimpy,代码行数:70,代码来源:app.py

示例14: Worker

# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import put_nowait [as 别名]
class Worker(Server):
    """ Worker Node

    Workers perform two functions:

    1.  **Serve data** from a local dictionary
    2.  **Perform computation** on that data and on data from peers

    Additionally workers keep a Center informed of their data and use that
    Center to gather data from other workers when necessary to perform a
    computation.

    You can start a worker with the ``dworker`` command line application::

        $ dworker scheduler-ip:port

    **State**

    * **data:** ``{key: object}``:
        Dictionary mapping keys to actual values
    * **active:** ``{key}``:
        Set of keys currently under computation
    * **ncores:** ``int``:
        Number of cores used by this worker process
    * **executor:** ``concurrent.futures.ThreadPoolExecutor``:
        Executor used to perform computation
    * **local_dir:** ``path``:
        Path on local machine to store temporary files
    * **center:** ``rpc``:
        Location of center or scheduler.  See ``.ip/.port`` attributes.
    * **name:** ``string``:
        Alias
    * **services:** ``{str: Server}``:
        Auxiliary web servers running on this worker
    * **service_ports:** ``{str: port}``:

    Examples
    --------

    Create centers and workers in Python:

    >>> from distributed import Center, Worker
    >>> c = Center('192.168.0.100', 8787)  # doctest: +SKIP
    >>> w = Worker(c.ip, c.port)  # doctest: +SKIP
    >>> yield w._start(port=8788)  # doctest: +SKIP

    Or use the command line::

       $ dcenter
       Start center at 127.0.0.1:8787

       $ dworker 127.0.0.1:8787
       Start worker at:            127.0.0.1:8788
       Registered with center at:  127.0.0.1:8787

    See Also
    --------
    distributed.center.Center:
    """

    def __init__(self, center_ip, center_port, ip=None, ncores=None,
                 loop=None, local_dir=None, services=None, service_ports=None,
                 name=None, **kwargs):
        self.ip = ip or get_ip()
        self._port = 0
        self.ncores = ncores or _ncores
        self.data = dict()
        self.loop = loop or IOLoop.current()
        self.status = None
        self.local_dir = local_dir or tempfile.mkdtemp(prefix='worker-')
        self.executor = ThreadPoolExecutor(self.ncores)
        self.thread_tokens = Queue()  # https://github.com/tornadoweb/tornado/issues/1595#issuecomment-198551572
        for i in range(self.ncores):
            self.thread_tokens.put_nowait(i)
        self.center = rpc(ip=center_ip, port=center_port)
        self.active = set()
        self.name = name

        if not os.path.exists(self.local_dir):
            os.mkdir(self.local_dir)

        if self.local_dir not in sys.path:
            sys.path.insert(0, self.local_dir)

        self.services = {}
        self.service_ports = service_ports or {}
        for k, v in (services or {}).items():
            if isinstance(k, tuple):
                k, port = k
            else:
                port = 0

            self.services[k] = v(self)
            self.services[k].listen(port)
            self.service_ports[k] = self.services[k].port

        handlers = {'compute': self.compute,
                    'gather': self.gather,
                    'compute-stream': self.compute_stream,
                    'run': self.run,
#.........这里部分代码省略.........
开发者ID:coobas,项目名称:distributed,代码行数:103,代码来源:worker.py

示例15: SQSDrain

# 需要导入模块: from tornado.queues import Queue [as 别名]
# 或者: from tornado.queues.Queue import put_nowait [as 别名]
class SQSDrain(object):
    """Implementation of IDrain that writes to an AWS SQS queue.
    """

    def __init__(self, logger, loop, sqs_client,
                 metric_prefix='emitter'):
        self.emitter = sqs_client
        self.logger = logger
        self.loop = loop
        self.metric_prefix = metric_prefix
        self.output_error = Event()
        self.state = RUNNING
        self.sender_tag = 'sender:%s.%s' % (self.__class__.__module__,
                                            self.__class__.__name__)
        self._send_queue = Queue()
        self._should_flush_queue = Event()
        self._flush_handle = None
        self.loop.spawn_callback(self._onSend)

    @gen.coroutine
    def _flush_send_batch(self, batch_size):
        send_batch = [
            self._send_queue.get_nowait()
            for pos in range(min(batch_size, self.emitter.max_messages))
        ]
        try:
            response = yield self.emitter.send_message_batch(*send_batch)
        except SQSError as err:
            self.logger.exception('Error encountered flushing data to SQS: %s',
                                  err)
            self.output_error.set()
            for msg in send_batch:
                self._send_queue.put_nowait(msg)
        else:
            if response.Failed:
                self.output_error.set()
                for req in response.Failed:
                    self.logger.error('Message failed to send: %s', req.Id)
                    self._send_queue.put_nowait(req)

    @gen.coroutine
    def _onSend(self):
        respawn = True
        while respawn:
            qsize = self._send_queue.qsize()
            # This will keep flushing until clear,
            # including items that show up in between flushes
            while qsize > 0:
                try:
                    yield self._flush_send_batch(qsize)
                except Exception as err:
                    self.logger.exception(err)
                    self.output_error.set()
                qsize = self._send_queue.qsize()
            # We've cleared the backlog, remove any possible future flush
            if self._flush_handle:
                self.loop.remove_timeout(self._flush_handle)
                self._flush_handle = None
            self._should_flush_queue.clear()
            yield self._should_flush_queue.wait()

    @gen.coroutine
    def close(self, timeout=None):
        self.state = CLOSING
        yield self._send_queue.join(timeout)

    def emit_nowait(self, msg):
        if self._send_queue.qsize() >= self.emitter.max_messages:
            # Signal flush
            self._should_flush_queue.set()
            raise QueueFull()
        elif self._flush_handle is None:
            # Ensure we flush messages at least by MAX_TIMEOUT
            self._flush_handle = self.loop.add_timeout(
                MAX_TIMEOUT,
                lambda: self._should_flush_queue.set(),
            )
        self.logger.debug("Drain emitting")
        self._send_queue.put_nowait(msg)

    @gen.coroutine
    def emit(self, msg, timeout=None):
        if self._send_queue.qsize() >= self.emitter.max_messages:
            # Signal flush
            self._should_flush_queue.set()
        elif self._flush_handle is None:
            # Ensure we flush messages at least by MAX_TIMEOUT
            self._flush_handle = self.loop.add_timeout(
                MAX_TIMEOUT,
                lambda: self._should_flush_queue.set(),
            )
        yield self._send_queue.put(msg, timeout)
开发者ID:CrowdStrike,项目名称:cs.eyrie,代码行数:94,代码来源:drain.py


注:本文中的tornado.queues.Queue.put_nowait方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。