当前位置: 首页>>代码示例>>Python>>正文


Python Queue.full方法代码示例

本文整理汇总了Python中multiprocessing.Queue.full方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.full方法的具体用法?Python Queue.full怎么用?Python Queue.full使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.Queue的用法示例。


在下文中一共展示了Queue.full方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_worker_processes_shuts_down_after_processing_its_maximum_number_of_messages

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import full [as 别名]
def test_worker_processes_shuts_down_after_processing_its_maximum_number_of_messages():
    """
    Test worker processes shutdown after processing maximum number of messages
    """
    # Setup SQS Queue
    conn = boto.connect_sqs()
    queue = conn.create_queue("tester")

    # Build the SQS Message
    message_body = {"task": "tests.tasks.index_incrementer", "args": [], "kwargs": {"message": 23}}
    message = Message()
    body = json.dumps(message_body)
    message.set_body(body)

    # Add message to internal queue
    internal_queue = Queue(3)
    internal_queue.put({"queue": queue.id, "message": message, "start_time": time.time(), "timeout": 30})
    internal_queue.put({"queue": queue.id, "message": message, "start_time": time.time(), "timeout": 30})
    internal_queue.put({"queue": queue.id, "message": message, "start_time": time.time(), "timeout": 30})

    # When I Process messages
    worker = ProcessWorker(internal_queue)
    worker._messages_to_process_before_shutdown = 2

    # Then I return from run()
    worker.run().should.be.none

    # With messages still on the queue
    internal_queue.empty().should.be.false
    internal_queue.full().should.be.false
开发者ID:spulec,项目名称:PyQS,代码行数:32,代码来源:test_worker.py

示例2: prefetch_queue

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import full [as 别名]
class prefetch_queue(object):

    def __init__(self, batch_size, data_dir, phase=True):
        self.producer = tool.av_generator(batch_size, data_dir, train=phase)
        self.queue = Queue(5)

    def produce(self):
        if not self.queue.full():
            self.queue.put(self.producer.next())
        else:
            pass

    def samples(self):
            if not self.queue.empty():
                item = self.queue.get()
                return item

    def ini_queue(self):
        while not self.queue.full():
            print '....'
            self.produce()

    def next(self):
        self.produce()
        return self.samples()
开发者ID:saicoco,项目名称:_practice,代码行数:27,代码来源:batch_queue.py

示例3: test_worker_processes_shuts_down_after_processing_its_max_number_of_msgs

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import full [as 别名]
def test_worker_processes_shuts_down_after_processing_its_max_number_of_msgs():
    """
    Test worker processes shutdown after processing maximum number of messages
    """
    # Setup SQS Queue
    conn = boto3.client('sqs', region_name='us-east-1')
    queue_url = conn.create_queue(QueueName="tester")['QueueUrl']

    # Build the SQS Message
    message = {
        'Body': json.dumps({
            'task': 'tests.tasks.index_incrementer',
            'args': [],
            'kwargs': {
                'message': 23,
            },
        }),
        "ReceiptHandle": "receipt-1234",
    }

    # Add message to internal queue
    internal_queue = Queue(3)
    internal_queue.put(
        {
            "queue": queue_url,
            "message": message,
            "start_time": time.time(),
            "timeout": 30,
        }
    )
    internal_queue.put(
        {
            "queue": queue_url,
            "message": message,
            "start_time": time.time(),
            "timeout": 30,
        }
    )
    internal_queue.put(
        {
            "queue": queue_url,
            "message": message,
            "start_time": time.time(),
            "timeout": 30,
        }
    )

    # When I Process messages
    worker = ProcessWorker(internal_queue, INTERVAL)
    worker._messages_to_process_before_shutdown = 2

    # Then I return from run()
    worker.run().should.be.none

    # With messages still on the queue
    internal_queue.empty().should.be.false
    internal_queue.full().should.be.false
开发者ID:spulec,项目名称:PyQS,代码行数:59,代码来源:test_worker.py

示例4: start_with_return

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import full [as 别名]
    def start_with_return(self):
        """
            Start listening for tasks.
            用 multiprocessing.Queue 来实现并行化, 这样并行是async的。
            buff is 进程池
        """
        self.socket.bind('tcp://{}:{}'.format(self.host, self.port))
        buff_size = 3
        buff = Queue(buff_size)  
        count = 0
        while True:
            #todo:  不用buff.full 来判断,而用cpu是否空闲判断
            #querry = self.socket.recv_pyobj()
            recv_dict = self.socket.recv_pyobj() 
            header = recv_dict['header']
            
            if not buff.full():
                #if querry == 'querry' : 
                if header == 'querry':  
                    if not buff.full(): 
                        self.socket.send_pyobj('available') 
                    else: 
                        self.socket.send_pyobj('not available') 
                        self.is_available = False 
                        time.sleep(1)
                    #temp = self.socket.recv_pyobj()
                    #runnable_string, args, kwargs = temp  
                elif header == 'run': 
                    runnable_string = recv_dict['runnable_string']
                    runnable = pickle.loads(runnable_string)
                    args = recv_dict['args']
                    kwargs = recv_dict['kwargs']
                    

                    #args= pickle.loads(args)
                    count += 1
                    if self.info>0: 
                        #print 'put in queue count %d'%(count, )
                        print 'put in queue count %d port=%d'%(count, self.port)
                    #buff.put(count)
                    #p=Process(target=run_one_async, args=(runnable, buff, args, kwargs))
                    p=Process(target=run_one_with_return, args=(runnable, buff, args, kwargs))
                    p.start()
                    p.join()   #Block the calling thread until the process whose join() method is called terminates or until the optional timeout occurs.
                    res = buff.get()
                    self.socket.send_pyobj(res) 
                elif header == 'stop':  
                    self.socket.send_pyobj('stop server') 
                    break 
开发者ID:chiwhalee,项目名称:brokest,代码行数:51,代码来源:brokest.py

示例5: main

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import full [as 别名]
def main():
    run_queue = Queue(50)
    
    processes = [Process(target=run_task, args=(run_queue,))
                     for i in range(20)]
    for p in processes:
        p.daemon = True
        p.start()
    
    signal.signal(signal.SIGTERM, SignalTERM)

    db = getDb()
    channel_list = db.query('SELECT * FROM channel where where id = 833 ORDER BY id').fetchall()
    channel_len = len(channel_list)
    i = 1
    for channel in channel_list:
        while run_queue.full():
            # 若任务队列已满,则等待
            time.sleep(3)
        config = channel['config']
        try:
            config = json.loads(config)
            run_queue.put({'code': channel['code'], 'config': config, 'type': channel['type'], 'province': channel['province'], 'city': channel['city']})
            print 'Queue: %s/%s' % (i, channel_len)
            i = i + 1
        except:
            print 'error'
            print config

    for i in range(20):
        run_queue.put("STOP")
    for p in processes:
        p.join()
开发者ID:I0T,项目名称:EPGT,代码行数:35,代码来源:bot-zjws.py

示例6: run

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import full [as 别名]
  def run(self, count=Arbitrary.TEST_COUNT):
    print('start test.')

    if self.process > 1:
      from multiprocessing import Queue
    else:
      from queue import Queue

    runner = PropRunner(count)
    queue = Queue(maxsize=len(PyQCheck.TEST_STEP))
    if self.process > 1:
      # multi process
      PyQWorker().set([
        Process(
          target=runner.run, args=(test,), kwargs={"queue": queue})         for test in PyQCheck.TEST_STEP
      ]).start(self.process)
    else:
      # linear
      for test in PyQCheck.TEST_STEP:
        runner.run(test, queue=queue)

    length = len(PyQCheck.TEST_STEP)
    while True:
      if queue.full():
        print('finish.')
        for i in range(length):
          self.results.append(queue.get())
        return self
开发者ID:futoase,项目名称:PyQCheck,代码行数:30,代码来源:__init__.py

示例7: generate_routes

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import full [as 别名]
def generate_routes():
    logging.info('Start of route generation')
    number_of_processes = 8
    route_queue = Queue(maxsize=20000)
    sql = 'SELECT id, start_point, end_point FROM de_sim_routes'
    threading.Thread(target=_queue_feeder, args=(sql, route_queue, 20000, number_of_processes)).start()

    with connection.get_connection() as conn:
        cur = conn.cursor()
        cur.execute('SELECT COUNT(id) FROM de_sim_routes')  # execute 1.7 Secs
        rec = cur.fetchone()
        counter = Counter(rec[0])

    while not route_queue.full():
        time.sleep(0.2)

    start = time.time()
    processes = []
    for i in range(number_of_processes):
        p = ProcessRouteCalculation(route_queue, counter)
        processes.append(p)
        processes[-1].start()

    for p in processes:
        p.join()

    end = time.time()
    logging.info('Runtime Route Generation: %s', (end - start))
开发者ID:boerngen-schmidt,项目名称:commuter-simulation,代码行数:30,代码来源:route_calculation.py

示例8: GameManager

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import full [as 别名]
class GameManager():
    event_handlers = []

    def __init__(self):
        self.arduino = None
        self.event_thread = None
        self.event_q = Queue(1)
        atexit.register(self.cleanup())
        self.register_event_handlers()

    def register_event_handlers(self):
        self.event_handlers = [SoundEventHandler()]  # only one handler for now

    def connect_to_arduino(self, serial_addr):
        try:
            self.arduino = serial.Serial(serial_addr,
                                         baudrate=9600,
                                         bytesize=serial.EIGHTBITS,
                                         parity=serial.PARITY_NONE,
                                         stopbits=serial.STOPBITS_ONE)

        except Exception as e:
            print("error opening serial connection")
            raise e

    def poll_serial(self, q):
        while True:
            ascii_line = self.arduino.readline()
            if "e:" in ascii_line:
                q.put(ascii_line[2:])

        
    def run(self):
        self.event_thread = Process(target=self.poll_serial, args=(self.event_q,))
        self.event_thread.start()

        while True:
            if self.event_q.full():
                event = self.event_q.get()
                for h in self.event_handlers:
                    h.process_event(event)


    def cleanup(self):
        try:
            if self.event_thread:
                self.event_thread.terminate()

            if self.arduino:
                self.arduino.close()

            for h in self.event_handlers:
                h.cleanup()

        except Exception as e:
            print("Cleanup exception: " + str(e))
开发者ID:wangdrew,项目名称:e-foosball,代码行数:58,代码来源:GameManager.py

示例9: DataSource

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import full [as 别名]
class DataSource(threading.Thread):
  """
  A class to represent a stream of data that supplies a 2D array of data.

  The class has a few standard methods which are intended to be over-written,
  all of which are methods from the super class, Thread. It is intended to be
  run in parallel to simulation, and it has a Queue to safely access the data
  that is created from the class.

  This class is intended to be fed to the Converter class.
  """

  def __init__(self, queueMax = 1):
    """
    Init the DataSource as a Thread with a Queue.

    Keyword Arguments:
    queueMax -- The maximum number of data points to be held within the Queue
      (default 1)
    """

    self.queueMax = queueMax
    self.queue = Queue(queueMax)

    self.error = False
    threading.Thread.__init__(self)

  def add(self, data):
    """
    Add a point of data to the Queue.

    Make sure the Queue is not full before adding the new data.

    Keyword Arguments:
    data -- The data to be added to the Queue
    """

    if(not(self.queue.full())): self.queue.put(data)

  def get(self):
    """
    Get a point of data from the Queue.

    Make sure the Queue is not empty before removing a data point from the
    Queue.

    Returns:
      The least recent data-point in the Queue. (first in, first out)
    """

    if(not(self.queue.empty())): return self.queue.get()
开发者ID:alexoneill,项目名称:TerraMotus,代码行数:53,代码来源:sources.py

示例10: browse

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import full [as 别名]
def browse():
    wordlist = list(db.items())
    size = len(wordlist)
    totalcount = 0.0
    right = 0.0
    lookup = Queue(maxsize=int(prefetch))
    answer = Queue(maxsize=int(prefetch))
    lookuper = Process(target=answers, args=(lookup, answer))
    lookuper.daemon = True
    lookuper.start()

    if size <= 1:
        print("There must be at least two words needed in the list.")
        exit()
    i = 0
    while(1):
        while(not lookup.full()):
            k = wordlist[i][0]
            i = i + 1
            if i >= size:
                i = 0
            k = k.lower()
            lookup.put(k)
        result = answer.get()
        k = result.key.text
        if k not in db:
            continue
        print(result.show())
        speak(result)

        try:
            word = input("(d) Delete, (enter) Continue: ")
            if word == "d":
                del db[k]
                wordlist = list(db.items())
                size = len(wordlist)
                if size <= 1:
                    print("There must be at least two words "
                          "needed in the list.")
                    exit()
        except KeyboardInterrupt:
            result(right, totalcount)
开发者ID:iblis17,项目名称:zdict,代码行数:44,代码来源:ydict.py

示例11: __init__

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import full [as 别名]
class SafeQueue:
    """ Safe Queue implementation is a wrapper around standard multiprocessing
        queue. Implements safe queuing and dequeueing. """

    def __init__(self, size=10):
        self._queue = Queue(size)
        self._lock = Lock()

    def queue(self, inp):
        self._lock.acquire()
        if self._queue.full():
            self._queue.get()
        self._queue.put_nowait(inp)
        self._lock.release()

    def dequeue(self):
        self._lock.acquire()
        item = None
        if not self._queue.empty():
            item = self._queue.get_nowait()
        self._lock.release()
        return item
开发者ID:antiface,项目名称:spectral,代码行数:24,代码来源:queue.py

示例12: main

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import full [as 别名]
def main():
    f = file('Forenotice.CSV', 'r')
    run_queue = Queue(50)
   
    for i in range(20):
        p = Process(target=run_task, args=(run_queue,))
        p.daemon = True
        p.start()
   
    i = 104
    while 1:
        while run_queue.full():
            time.sleep(3)
        
        line = f.readline()
        if not line:
            break
        id, name = line.split(',', 1)
        run_queue.put({'id': id, 'name': name}) 
        print "Queue: %s / %s" % (id, '73380')

    while 1:
        time.sleep(3)
开发者ID:I0T,项目名称:EPGT,代码行数:25,代码来源:wiki_bot.py

示例13: WorkerProcess

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import full [as 别名]
class WorkerProcess(object):
    def __init__(self, idnum, topic, collname, in_counter_value, out_counter_value,
                 drop_counter_value, queue_maxsize,
                 mongodb_host, mongodb_port, mongodb_name, nodename_prefix):
        self.name = "WorkerProcess-%4d-%s" % (idnum, topic)
        self.id = idnum
        self.topic = topic
        self.collname = collname
        self.queue = Queue(queue_maxsize)
        self.out_counter = Counter(out_counter_value)
        self.in_counter  = Counter(in_counter_value)
        self.drop_counter = Counter(drop_counter_value)
        self.worker_out_counter = Counter()
        self.worker_in_counter  = Counter()
        self.worker_drop_counter = Counter()
        self.mongodb_host = mongodb_host
        self.mongodb_port = mongodb_port
        self.mongodb_name = mongodb_name
        self.nodename_prefix = nodename_prefix
        self.quit = Value('i', 0)

        self.process = Process(name=self.name, target=self.run)
        self.process.start()

    def init(self):
        global use_setproctitle
	if use_setproctitle:
            setproctitle("mongodb_log %s" % self.topic)

        self.mongoconn = Connection(self.mongodb_host, self.mongodb_port)
        self.mongodb = self.mongoconn[self.mongodb_name]
        self.mongodb.set_profiling_level = SLOW_ONLY

        self.collection = self.mongodb[self.collname]
        self.collection.count()

        self.queue.cancel_join_thread()

        rospy.init_node(WORKER_NODE_NAME % (self.nodename_prefix, self.id, self.collname),
                        anonymous=False)

        self.subscriber = None
        while not self.subscriber:
            try:
                msg_class, real_topic, msg_eval = rostopic.get_topic_class(self.topic, blocking=True)
                self.subscriber = rospy.Subscriber(real_topic, msg_class, self.enqueue, self.topic)
            except rostopic.ROSTopicIOException:
                print("FAILED to subscribe, will keep trying %s" % self.name)
                time.sleep(randint(1,10))
            except rospy.ROSInitException:
                print("FAILED to initialize, will keep trying %s" % self.name)
                time.sleep(randint(1,10))
                self.subscriber = None

    def run(self):
        self.init()

        print("ACTIVE: %s" % self.name)

        # run the thread
        self.dequeue()

        # free connection
        # self.mongoconn.end_request()

    def is_quit(self):
        return self.quit.value == 1

    def shutdown(self):
        if not self.is_quit():
            #print("SHUTDOWN %s qsize %d" % (self.name, self.queue.qsize()))
            self.quit.value = 1
            self.queue.put("shutdown")
            while not self.queue.empty(): sleep(0.1)
        #print("JOIN %s qsize %d" % (self.name, self.queue.qsize()))
        self.process.join()
        self.process.terminate()

 


    def qsize(self):
        return self.queue.qsize()

    def enqueue(self, data, topic, current_time=None):
        if not self.is_quit():
            if self.queue.full():
                try:
                    self.queue.get_nowait()
                    self.drop_counter.increment()
                    self.worker_drop_counter.increment()
                except Empty:
                    pass
            #self.queue.put((topic, data, current_time or datetime.now()))
            self.queue.put((topic, data, rospy.get_time()))
            self.in_counter.increment()
            self.worker_in_counter.increment()

    def dequeue(self):
        while not self.is_quit():
#.........这里部分代码省略.........
开发者ID:Jailander,项目名称:mongodb_store,代码行数:103,代码来源:mongodb_log.py

示例14: multiprocess_progress

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import full [as 别名]
def multiprocess_progress(data, functor, finished, data_size, early_clip=None):
    from multiprocessing import Process, current_process, Queue

    num_procs = os.cpu_count()-1

    def worker(wnum, input_queue, output_queue):
        os.sched_setaffinity(0, [wnum])
        while True:
            try:
                idx, value = input_queue.get(block=False)
                if value == 'STOP':
                    break
                output_queue.put((idx, functor(value)))
            except:
                pass
            os.sched_yield()

    task_queue = Queue(2*num_procs)
    done_queue = Queue(2*num_procs)

    # Launch workers.
    print('Running {} workers ...'.format(num_procs))
    processes = []
    for i in range(num_procs):
        processes.append(Process(target = worker,
            args = (i, task_queue, done_queue),
            name = 'worker {}'.format(i),
            daemon = True))
        processes[-1].start()

    # Push input data, and check for output data.
    num_sent = 0
    num_done = 0
    num_clipped = 0
    iterator = iter(data)
    perc = 0

    def print_progress(msg=None):
        msg_str = ''
        if msg is not None:
            msg_str = '['+msg+']'
        print('\033[2K\r{} sent, {} done, {} clipped, {} total ({} %) {}'.format(num_sent, 
            num_done, num_clipped, data_size, perc, msg_str), end='')

    while num_done < data_size:
        print_progress('sending work')

        while num_sent < data_size and not task_queue.full():
            nextval = next(iterator)
            clipped = False
            if early_clip is not None:
                clipped, clip_result = early_clip(num_sent, nextval)
                if clipped:
                    finished(num_sent, clip_result)
                    num_clipped += 1
                    num_done += 1

            if not clipped:
                task_queue.put((num_sent, nextval))

            num_sent += 1
            os.sched_yield()

        while True:
            try:
                i, result = done_queue.get(block=False)
                finished(i, result)
                num_done += 1
                perc = int(num_done / data_size * 100)
                print_progress('collecting results')
            except:
                break;
            time.sleep(0)

        print_progress()
        time.sleep(0)

    # Terminate workers.
    for i in range(num_procs):
        task_queue.put((-1, 'STOP'))

    for p in processes:
        p.join()

    print('\n ... done')
开发者ID:cessen,项目名称:psychopath,代码行数:87,代码来源:generate_spectra_tables.py

示例15: TopicLogger

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import full [as 别名]
class TopicLogger(MongoDBLogger):
    """
    This class implements a generic topic logger.
    It simply dumps all messages received from the topic into the MongoDB.
    """

    def __init__(self, name, topic, collname, mongodb_host, mongodb_port, mongodb_name, max_queuesize=QUEUE_MAXSIZE):
        MongoDBLogger.__init__(self, name, topic, collname, mongodb_host, mongodb_port, mongodb_name)
        self.worker_out_counter = Counter()
        self.worker_in_counter = Counter()
        self.worker_drop_counter = Counter()
        self.queue = Queue(max_queuesize)

    def _init(self):
        """
        This method initializes this process.
        It initializes the connection to the MongoDB and subscribes to the topic.
        """
        self.mongoconn = Connection(self.mongodb_host, self.mongodb_port)
        self.mongodb = self.mongoconn[self.mongodb_name]
        self.mongodb.set_profiling_level = SLOW_ONLY

        self.collection = self.mongodb[self.collname]
        self.collection.count()

        self.queue.cancel_join_thread()
        self.subscriber = None
        while not self.subscriber:
            try:
                msg_class, real_topic, msg_eval = rostopic.get_topic_class(self.topic, blocking=True)
                self.subscriber = rospy.Subscriber(real_topic, msg_class, self._enqueue, self.topic)
            except rostopic.ROSTopicIOException:
                rospy.logwarn("FAILED to subscribe, will keep trying %s" % self.name)
                time.sleep(randint(1, 10))
            except rospy.ROSInitException:
                rospy.logwarn("FAILED to initialize, will keep trying %s" % self.name)
                time.sleep(randint(1, 10))
                self.subscriber = None

    def run(self):
        """
        This method does the actual logging.
        """
        self._init()
        rospy.logdebug("ACTIVE: %s" % self.name)
        # Process the messages
        while not self.is_quit():
            self._dequeue()

        # we must make sure to clear the queue before exiting,
        # or the parent thread might deadlock otherwise
        self.subscriber.unregister()
        self.subscriber = None
        while not self.queue.empty():
            self.queue.get_nowait()
        rospy.logdebug("STOPPED: %s" % self.name)

    def shutdown(self):
        self.queue.put("shutdown")
        super(TopicLogger, self).shutdown()

    def _sanitize_value(self, v):
        if isinstance(v, rospy.Message):
            return self._message_to_dict(v)
        elif isinstance(v, genpy.rostime.Time):
            t = datetime.utcfromtimestamp(v.secs)
            return t + timedelta(microseconds=v.nsecs / 1000.)
        elif isinstance(v, genpy.rostime.Duration):
            return v.secs + v.nsecs / 1000000000.
        elif isinstance(v, list):
            return [self._sanitize_value(t) for t in v]
        else:
            return v

    def _message_to_dict(self, val):
        d = {}
        for f in val.__slots__:
            d[f] = self._sanitize_value(getattr(val, f))
        return d

    def qsize(self):
        return self.queue.qsize()

    def _enqueue(self, data, topic, current_time=None):
        if not self.is_quit():
            if self.queue.full():
                try:
                    self.queue.get_nowait()
                    self.worker_drop_counter.increment()
                except Empty:
                    pass
            self.queue.put((topic, data, rospy.get_time()))
            self.worker_in_counter.increment()

    def _dequeue(self):
        try:
            t = self.queue.get(True)
        except IOError:
            self.quit = True
            return
#.........这里部分代码省略.........
开发者ID:Hansa064,项目名称:ros-mongodb_log,代码行数:103,代码来源:mongodb_log.py


注:本文中的multiprocessing.Queue.full方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。