当前位置: 首页>>代码示例>>Python>>正文


Python Queue.get方法代码示例

本文整理汇总了Python中Queue.get方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.get方法的具体用法?Python Queue.get怎么用?Python Queue.get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Queue的用法示例。


在下文中一共展示了Queue.get方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: make_web

# 需要导入模块: import Queue [as 别名]
# 或者: from Queue import get [as 别名]
def make_web(queue):
    app = Flask(__name__)

    @app.route('/')
    def index():
        return render_template('index.html')

    def gen():
        while True:
            frame = queue.get()
            _, frame = cv2.imencode('.JPEG', frame)
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frame.tostring() + b'\r\n')

    @app.route('/video_feed')
    def video_feed():
        return Response(gen(),
                        mimetype='multipart/x-mixed-replace; boundary=frame')

    try:
        app.run(host='0.0.0.0', port=8889)
    except:
        print('unable to open port') 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:25,代码来源:rl_data.py

示例2: get

# 需要导入模块: import Queue [as 别名]
# 或者: from Queue import get [as 别名]
def get(self, poll_interval=5):
    while True:
      try:
        # Using Queue.get() with a timeout is really expensive - Python uses
        # busy waiting that wakes up the process every 50ms - so we switch
        # to a more efficient polling method if there is no activity for
        # <fast_poll_time> seconds.
        if time.time() - self.last_item_time < self.fast_poll_time:
          message = Queue.Queue.get(self, block=True, timeout=poll_interval)
        else:
          time.sleep(poll_interval)
          message = Queue.Queue.get(self, block=False)
        break

      except Queue.Empty:
        self.callback()

    self.last_item_time = time.time()
    return message 
开发者ID:soarpenguin,项目名称:python-scripts,代码行数:21,代码来源:grr_utils.py

示例3: worker

# 需要导入模块: import Queue [as 别名]
# 或者: from Queue import get [as 别名]
def worker(queue, user, size, outdir, total):
    while True:
        try:
            photo = queue.get(False)
        except Queue.Empty:
            break
        media_url = photo[1]
        urllib3_download(media_url, size, outdir)
        with lock:
            global downloaded
            downloaded += 1
            d = {
                'media_url': os.path.basename(media_url),
                'user': user,
                'index': downloaded + 1 if downloaded < total else total,
                'total': total,
            }
            progress = PROGRESS_FORMATTER % d
            sys.stdout.write('\r%s' % progress)
            sys.stdout.flush() 
开发者ID:shichao-an,项目名称:twitter-photos,代码行数:22,代码来源:parallel.py

示例4: _worker_manager_loop

# 需要导入模块: import Queue [as 别名]
# 或者: from Queue import get [as 别名]
def _worker_manager_loop(in_queue, out_queue, done_event, pin_memory, device_id):
    if pin_memory:
        torch.cuda.set_device(device_id)

    while True:
        try:
            r = in_queue.get()
        except Exception:
            if done_event.is_set():
                return
            raise
        if r is None:
            break
        if isinstance(r[1], ExceptionWrapper):
            out_queue.put(r)
            continue
        idx, batch = r
        try:
            if pin_memory:
                batch = pin_memory_batch(batch)
        except Exception:
            out_queue.put((idx, ExceptionWrapper(sys.exc_info())))
        else:
            out_queue.put((idx, batch)) 
开发者ID:namisan,项目名称:mt-dnn,代码行数:26,代码来源:dataloader.py

示例5: _set_SIGCHLD_handler

# 需要导入模块: import Queue [as 别名]
# 或者: from Queue import get [as 别名]
def _set_SIGCHLD_handler():
    # Windows doesn't support SIGCHLD handler
    if sys.platform == 'win32':
        return
    # can't set signal in child threads
    if not isinstance(threading.current_thread(), threading._MainThread):
        return
    global _SIGCHLD_handler_set
    if _SIGCHLD_handler_set:
        return
    previous_handler = signal.getsignal(signal.SIGCHLD)
    if not callable(previous_handler):
        previous_handler = None

    def handler(signum, frame):
        # This following call uses `waitid` with WNOHANG from C side. Therefore,
        # Python can still get and update the process status successfully.
        _error_if_any_worker_fails()
        if previous_handler is not None:
            previous_handler(signum, frame)

    signal.signal(signal.SIGCHLD, handler)
    _SIGCHLD_handler_set = True 
开发者ID:namisan,项目名称:mt-dnn,代码行数:25,代码来源:dataloader.py

示例6: act

# 需要导入模块: import Queue [as 别名]
# 或者: from Queue import get [as 别名]
def act(self, action):
        if self.nthreads > 1:
            new = self.pool.map(env_step, zip(self.env, action))
        else:
            new = [env.step(act) for env, act in zip(self.env, action)]

        reward = np.asarray([i[1] for i in new], dtype=np.float32)
        done = np.asarray([i[2] for i in new], dtype=np.float32)

        channels = self.state_.shape[1]//self.input_length
        state = np.zeros_like(self.state_)
        state[:,:-channels,:,:] = self.state_[:,channels:,:,:]
        for i, (ob, env) in enumerate(zip(new, self.env)):
            if ob[2]:
                state[i,-channels:,:,:] = env.reset().transpose((2,0,1))
            else:
                state[i,-channels:,:,:] = ob[0].transpose((2,0,1))
        self.state_ = state

        if self.web_viz:
            try:
                while self.queue.qsize() > 10:
                    self.queue.get(False)
            except queue.Empty:
                pass
            frame = self.visual()
            self.queue.put(frame)

        return reward, done 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:31,代码来源:rl_data.py

示例7: get_msg

# 需要导入模块: import Queue [as 别名]
# 或者: from Queue import get [as 别名]
def get_msg(self, block=True, timeout=None):
        """ Gets a message if there is one that is ready. """
        if timeout is None:
            # Queue.get(timeout=None) has stupid uninteruptible
            # behavior, so wait for a week instead
            timeout = 604800
        return self._in_queue.get(block, timeout) 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:9,代码来源:channels.py

示例8: download_worker

# 需要导入模块: import Queue [as 别名]
# 或者: from Queue import get [as 别名]
def download_worker():
    while True:
        url = queue.get()
        download_file(url, SAVE_DIR)
        queue.task_done()

# Returns the path of the specified page number 
开发者ID:benjaminheng,项目名称:interfacelift-downloader,代码行数:9,代码来源:interfacelift-downloader.py

示例9: worker

# 需要导入模块: import Queue [as 别名]
# 或者: from Queue import get [as 别名]
def worker(sess,model_options,model_vars,Queue,CLASS_DICT):
    while True:
        # print 'Queue Size', Queue.qsize()
        try:
            fname = Queue.get()
        except:
            return
        start = time.time()
        file_name_orig = fname.split(' ')[0].split('/')[1].strip()
        file_name = file_name_orig.replace('.avi','.npz')
        class_name = fname.split(' ')[0].split('/')[0].strip().lower()
        class_idx = CLASS_DICT[class_name]
        try:
            frames = np.load(model_options['data_dir']+file_name)['arr_0']
        except:
            print "Couldn't Open: ",model_options['data_dir']+file_name
	    Queue.task_done()
            continue

        idx = 0
        if model_options['mode'] == 'train':
            idx = random.randint(0,frames.shape[0]-1)

        frames = frames[idx]
        tmpImg,tmpLab,num_crops = getCrops(sess,model_options,model_vars,frames,np.array((class_idx)))

        if model_options['mode'] == 'train':
            for j in range(num_crops):
                size = model_options['example_size']
                sess.run(model_vars['enqueue_op'],feed_dict={model_vars['images']:tmpImg[j*size:(j+1)*size],
                         model_vars['labels']:tmpLab[j:(j+1)]})
        else:
            sess.run(model_vars['enqueue_op'],feed_dict={model_vars['images']:tmpImg,
                     model_vars['labels']:tmpLab,
                     model_vars['names']:[[file_name_orig]]*num_crops})

        Queue.task_done() 
开发者ID:amlankar,项目名称:adascan-public,代码行数:39,代码来源:datasets.py

示例10: get

# 需要导入模块: import Queue [as 别名]
# 或者: from Queue import get [as 别名]
def get(self, **kwargs):
        """Get an item from the queue. Kwargs are ignored (often used in standard library queue.get calls)"""
        msg = self.queue.get(acknowledge=False)
        if msg is None:
            raise Empty
        return pickle.loads(msg.body) 
开发者ID:XENON1T,项目名称:pax,代码行数:8,代码来源:parallel.py

示例11: multiprocess_configuration

# 需要导入模块: import Queue [as 别名]
# 或者: from Queue import get [as 别名]
def multiprocess_configuration(n_cpus, pax_id, base_config_kwargs, processing_queue_kwargs, output_queue_kwargs):
    """Yields configuration override dicts for multiprocessing"""
    # Config overrides for child processes
    common_override = dict(pax=dict(autorun=True, show_progress_bar=False),
                           DEFAULT=dict(pax_id=pax_id))

    input_override = dict(pax=dict(plugin_group_names=['input', 'output'],
                                   encoder_plugin=None,
                                   decoder_plugin=None,
                                   output='Queues.PushToQueue'),
                          Queues=dict(**processing_queue_kwargs))

    worker_override = {'pax': dict(input='Queues.PullFromQueue',
                                   output='Queues.PushToQueue',
                                   event_numbers_file=None,
                                   events_to_process=None),
                       # PullFromQueue can't have a timeout in the workers, see #444
                       'Queues.PullFromQueue': dict(timeout_after_sec=float('inf'),
                                                    **processing_queue_kwargs),
                       'Queues.PushToQueue': dict(preserve_ids=True,
                                                  many_to_one=True,
                                                  **output_queue_kwargs)}

    output_override = dict(pax=dict(plugin_group_names=['input', 'output'],
                                    encoder_plugin=None,
                                    decoder_plugin=None,
                                    event_numbers_file=None,
                                    events_to_process=None,
                                    input='Queues.PullFromQueue'),
                           Queues=dict(ordered_pull=True,
                                       **output_queue_kwargs))

    overrides = [('input', input_override)] + [('worker', worker_override)] * n_cpus + [('output', output_override)]

    for worker_type, worker_overide in overrides:
        new_conf = deepcopy(base_config_kwargs)
        new_conf['config_dict'] = combine_configs(new_conf.get('config_dict'),
                                                  common_override,
                                                  worker_overide)
        yield worker_type, new_conf 
开发者ID:XENON1T,项目名称:pax,代码行数:42,代码来源:parallel.py

示例12: check_local_processes_while_remote_processing

# 需要导入模块: import Queue [as 别名]
# 或者: from Queue import get [as 别名]
def check_local_processes_while_remote_processing(running_paxes, crash_fanout, terminate_host_on_crash=False):
    """Check on locally running paxes in running_paxes, returns list of remaining running pax processes.
     - Remove any paxes that have exited normally
     - If a pax has crashed, push a message to the crash fanout to terminate all paxes with the same id
     - Look for crash fanout messages from other processes, and terminate local paxes with the same id
     - terminate_host_on_crash: if True, raise exception in the host process if a pax crash is detected in
       a pax chain we're participating in. Do NOT use in a host process that can host multiple pax chains! We will not
       check the presence of other pax chains and terminate them too!
    """
    p_by_status = group_by_status(running_paxes)
    running_paxes = p_by_status['running']

    # If any of our own paxes crashed, send a message to the crash fanout
    # This will inform everyone connected to the server (including ourselves, on the next iteration)
    for crashed_w in p_by_status['crashed']:
        pax_id = crashed_w.pax_id
        exctype, traceb = get_exception_from_process(p_by_status['crashed'][0])
        print("Pax %s crashed!\nDumping exception traceback:\n\n%s\n\nNotifying crash fanout." % (
            pax_id, format_exception_dump(traceb)
        ))
        crash_fanout.put((pax_id, exctype, traceb))

        running_paxes, _ = terminate_paxes_with_id(running_paxes, pax_id)
        if terminate_host_on_crash:
            raise exctype("Pax %s crashed! Traceback:\n %s" % (pax_id, format_exception_dump(traceb)))

    # If any of the remote paxes crashed, we will learn about it from the crash fanout.
    try:
        pax_id, exctype, traceb = crash_fanout.get()
        print("Remote crash notification for pax %s.\n"
              "Remote exception traceback dump:\n\n%s\n.Terminating paxes with id %s." % (
                pax_id, format_exception_dump(traceb), pax_id))

        running_paxes, n_terminated = terminate_paxes_with_id(running_paxes, pax_id)
        if n_terminated > 0 and terminate_host_on_crash:
            raise exctype("Pax %s crashed! Traceback:\n %s" % (pax_id, format_exception_dump(traceb)))

    except Empty:
        pass

    return running_paxes 
开发者ID:XENON1T,项目名称:pax,代码行数:43,代码来源:parallel.py

示例13: get_exception_from_process

# 需要导入模块: import Queue [as 别名]
# 或者: from Queue import get [as 别名]
def get_exception_from_process(p):
    crdict = p.shared_dict
    try:
        exc_type = eval(crdict.get('exception_type', 'UnknownPropagatedException'),
                        exceptions.__dict__)
    except NameError:
        exc_type = exceptions.UnknownPropagatedException
    traceb = crdict.get('traceback', 'No traceback reported')
    return exc_type, traceb 
开发者ID:XENON1T,项目名称:pax,代码行数:11,代码来源:parallel.py

示例14: run

# 需要导入模块: import Queue [as 别名]
# 或者: from Queue import get [as 别名]
def run(self):
        while self.alive.isSet():
            try:
                # Queue.get with timeout to allow checking self.alive
                cmd = self.cmd_q.get(True, 0.1)
                self.handlers[cmd.type](cmd)
            except Queue.Empty as e:
                continue 
开发者ID:eliben,项目名称:code-for-blog,代码行数:10,代码来源:socketclientthread.py

示例15: _reduction_thread_fn

# 需要导入模块: import Queue [as 别名]
# 或者: from Queue import get [as 别名]
def _reduction_thread_fn(queue, group_id, device_ids, reduction_streams, nccl_streams):

        def _process_batch():
            dev_grad_batch, dev_events, job_event = queue.get()
            dev_coalesced = []
            # Coalesce the tensors on all devices and start a local reduction
            for dev_id, grad_batch, event, stream in zip(device_ids, dev_grad_batch, dev_events, reduction_streams):
                with torch.cuda.device(dev_id), torch.cuda.stream(stream):
                    stream.wait_event(event)
                    coalesced = _flatten_tensors(grad_batch)
                    dev_coalesced.append(coalesced)
            # Wait for all copies to complete before starting the NCCL kernel
            for stream in reduction_streams:
                stream.synchronize()
            nccl.reduce(dev_coalesced, root=0, streams=nccl_streams)

            # From now on we're only going to work on the first device (from device_ids)
            grad_batch = dev_grad_batch[0]
            coalesced = dev_coalesced[0]
            reduce_stream = reduction_streams[0]
            with torch.cuda.stream(reduce_stream):
                reduce_stream.wait_stream(nccl_streams[0])
                coalesced /= dist.get_world_size()
                dist.all_reduce(coalesced, group=group_id)
                for grad, reduced in zip(grad_batch, _unflatten_tensors(coalesced, grad_batch)):
                    grad.copy_(reduced)
            job_event.set()

        with torch.cuda.device(device_ids[0]):
            while True:
                _process_batch()  # just to have a clear scope 
开发者ID:hwang595,项目名称:ps_pytorch,代码行数:33,代码来源:data_parallel_dist.py


注:本文中的Queue.get方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。