当前位置: 首页>>代码示例>>Python>>正文


Python Queue.qsize方法代码示例

本文整理汇总了Python中multiprocessing.Queue.qsize方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.qsize方法的具体用法?Python Queue.qsize怎么用?Python Queue.qsize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.Queue的用法示例。


在下文中一共展示了Queue.qsize方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: run

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import qsize [as 别名]
    def run(self):
        logger.info('starting horizon agent')
        listen_queue = Queue(maxsize=settings.MAX_QUEUE_SIZE)
        pid = getpid()

        # Start the workers
        for i in range(settings.WORKER_PROCESSES):
            Worker(listen_queue, pid).start()

        # Start the listeners
        Listen(settings.PICKLE_PORT, listen_queue, pid, type="pickle").start()
        Listen(settings.UDP_PORT, listen_queue, pid, type="udp").start()

        # Start the roomba
        Roomba(pid).start()

        # Warn the Mac users
        try:
            listen_queue.qsize()
        except NotImplementedError:
            logger.info('WARNING: Queue().qsize() not implemented on Unix platforms like Mac OS X. Queue size logging will be unavailable.')

        # Keep yourself occupied, sucka
        while 1:
            time.sleep(100)
开发者ID:ftdysa,项目名称:skyline,代码行数:27,代码来源:horizon-agent.py

示例2: get_gif_url_list

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import qsize [as 别名]
def get_gif_url_list(timeout):
    gif_url_list_q = Queue()
    for i in range(1,6):
        process_list = []
        for j in range(1 + (i - 1) * 10,1 + i * 10):
            process_list.append(Process(target=fetch_gif_url,args=(data,j,gif_url_list_q)))
        for j in range(1 + (i - 1) * 10,1 + i * 10):
            process_list[j - (i - 1) * 10 - 1].start()
            # print '任务 {0} 开始...'.format(j)

        time_sum = 0
        while 1:
            for process in process_list:
                if not process.is_alive():
                    process_list.remove(process)
            if len(process_list) == 0:
                break

            elif time_sum == timeout:
                for process in process_list:
                    process.terminate()
                    print '{0} 假死被杀掉了...'.format(process.name)
                    process_list.remove(process)
                break
            else:
                time_sum += 1
                sleep(1)
                print str(time_sum) + '秒'
                continue
    print (gif_url_list_q.qsize()),'个结果'
    gif_url_list = []
    for i in range(gif_url_list_q.qsize()):
        gif_url_list.append(gif_url_list_q.get())
    return gif_url_list
开发者ID:cobain,项目名称:iSpider,代码行数:36,代码来源:worker.py

示例3: run

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import qsize [as 别名]
    def run(self):
        logger.info('starting horizon agent')
        listen_queue = Queue(maxsize=settings.MAX_QUEUE_SIZE)
        pid = getpid()

        #If we're not using oculus, don't bother writing to mini
        try:
            skip_mini = True if settings.OCULUS_HOST == '' else False
        except Exception:
            skip_mini = True

        # Start the workers
        for i in range(settings.WORKER_PROCESSES):
            if i == 0:
                Worker(listen_queue, pid, skip_mini, canary=True).start()
            else:
                Worker(listen_queue, pid, skip_mini).start()

        # Start the listeners
        Listen(settings.PICKLE_PORT, listen_queue, pid, type="pickle").start()
        Listen(settings.UDP_PORT, listen_queue, pid, type="udp").start()

        # Start the roomba
        Roomba(pid, skip_mini).start()

        # Warn the Mac users
        try:
            listen_queue.qsize()
        except NotImplementedError:
            logger.info('WARNING: Queue().qsize() not implemented on Unix platforms like Mac OS X. Queue size logging will be unavailable.')

        # Keep yourself occupied, sucka
        while 1:
            time.sleep(100)
开发者ID:B-Rich,项目名称:skyline,代码行数:36,代码来源:horizon-agent.py

示例4: parse_input_files

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import qsize [as 别名]
def parse_input_files( input_files, num_threads, genomes, min_coverage, min_proportion ):
    from multiprocessing import Process, Queue
    #from queue import Queue
    from time import sleep
    input_q = Queue()
    output_q = Queue()
    for input_file in input_files:
        input_q.put( input_file )
    if num_threads > input_q.qsize():
        num_threads = input_q.qsize()
    sleep( 1 )
    thread_list = []
    for current_thread in range( num_threads ):
        input_q.put( None )
        current_thread = Process( target=manage_input_thread, args=[ genomes.reference(), min_coverage, min_proportion, input_q, output_q ] )
        current_thread.start()
        #manage_input_thread( genomes.reference(), min_coverage, min_proportion, input_q, output_q )
        thread_list.append( current_thread )
    sleep( 1 )
    while num_threads > 0:
        new_genome = output_q.get()
        if new_genome is None:
            num_threads -= 1
        elif isinstance( new_genome, str ):
            genomes.add_failed_genome( new_genome )
        else:
            genomes.add_genome( new_genome )
    sleep( 1 )
    for current_thread in thread_list:
        current_thread.join()
开发者ID:nate-d-olson,项目名称:NASP,代码行数:32,代码来源:vcf_to_matrix.py

示例5: test_transfert_queue

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import qsize [as 别名]
def test_transfert_queue():
    t1 = "testTopic"
    topic = Topics()
    q = Queue()

    topic.process(t1,123)
    topic.process(t1,456)
    topic.process(t1,789)

    assert q.empty()

    topic.transfer(t1,q)

    assert q.qsize() > 0

    assert q.get() == [0, 123]
    assert q.get() == [1, 456]
    assert q.get() == [2, 789]

    topic.process(t1,111)
    topic.process(t1,222)

    assert q.qsize() > 0

    assert q.get() == [3, 111]
    assert q.get() == [4, 222]
开发者ID:Overdrivr,项目名称:pytelemetrycli,代码行数:28,代码来源:test_topics.py

示例6: test_cached

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import qsize [as 别名]
def test_cached(broker):
    broker.purge_queue()
    broker.cache.clear()
    group = 'cache_test'
    # queue the tests
    task_id = async('math.copysign', 1, -1, cached=True, broker=broker)
    async('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async('math.copysign', 1, -1, cached=True, broker=broker, group=group)
    async('math.popysign', 1, -1, cached=True, broker=broker, group=group)
    iter_id = async_iter('math.floor', [i for i in range(10)], cached=True)
    # test wait on cache
    # test wait timeout
    assert result(task_id, wait=10, cached=True) is None
    assert fetch(task_id, wait=10, cached=True) is None
    assert result_group(group, wait=10, cached=True) is None
    assert result_group(group, count=2, wait=10, cached=True) is None
    assert fetch_group(group, wait=10, cached=True) is None
    assert fetch_group(group, count=2, wait=10, cached=True) is None
    # run a single inline cluster
    task_count = 17
    assert broker.queue_size() == task_count
    task_queue = Queue()
    stop_event = Event()
    stop_event.set()
    for i in range(task_count):
        pusher(task_queue, stop_event, broker=broker)
    assert broker.queue_size() == 0
    assert task_queue.qsize() == task_count
    task_queue.put('STOP')
    result_queue = Queue()
    worker(task_queue, result_queue, Value('f', -1))
    assert result_queue.qsize() == task_count
    result_queue.put('STOP')
    monitor(result_queue)
    assert result_queue.qsize() == 0
    # assert results
    assert result(task_id, wait=500, cached=True) == -1
    assert fetch(task_id, wait=500, cached=True).result == -1
    # make sure it's not in the db backend
    assert fetch(task_id) is None
    # assert group
    assert count_group(group, cached=True) == 6
    assert count_group(group, cached=True, failures=True) == 1
    assert result_group(group, cached=True) == [-1, -1, -1, -1, -1]
    assert len(result_group(group, cached=True, failures=True)) == 6
    assert len(fetch_group(group, cached=True)) == 6
    assert len(fetch_group(group, cached=True, failures=False)) == 5
    delete_group(group, cached=True)
    assert count_group(group, cached=True) is None
    delete_cached(task_id)
    assert result(task_id, cached=True) is None
    assert fetch(task_id, cached=True) is None
    # iter cached
    assert result(iter_id) is None
    assert result(iter_id, cached=True) is not None
    broker.cache.clear()
开发者ID:schnitzelbub,项目名称:django-q,代码行数:61,代码来源:test_cached.py

示例7: evaluate_paths

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import qsize [as 别名]
 def evaluate_paths(self, paths, P_t, current_step, horizon=-1):
     jobs = collections.deque() 
     eval_queue = Queue()
     evaluated_paths = []        
     paths_tmp = [(i, paths[i]) for i in xrange(len(paths)) if len(paths[i][0]) != 0]
     for i in xrange(len(paths_tmp)):            
         logging.info("PathEvaluator: Evaluate path " + str(i))            
         p = Process(target=self.evaluate, args=(paths_tmp[i][0], 
                                                 paths_tmp[i][1], 
                                                 P_t, 
                                                 current_step, 
                                                 horizon, 
                                                 self.robot, 
                                                 eval_queue,))
         p.start()
         jobs.append(p)           
             
         if len(jobs) == self.num_cores - 1 or i == len(paths_tmp) - 1:
             if i == len(paths_tmp) - 1 and not len(jobs) == self.num_cores - 1:
                 while not eval_queue.qsize() == len(paths_tmp) % (self.num_cores - 1):                        
                     time.sleep(0.00001)
             else:
                 while not eval_queue.qsize() == self.num_cores - 1:                        
                     time.sleep(0.00001)
             jobs.clear()
             q_size = eval_queue.qsize()
             for j in xrange(q_size):
                 eval_elem = eval_queue.get()
                 if eval_elem != None:                   
                     evaluated_paths.append(eval_elem)
                 else:
                     print "Path could not be evaluated"                    
     path_rewards = [evaluated_paths[i][1] for i in xrange(len(evaluated_paths))]               
     if len(path_rewards) == 0:
         return (None, None, None, None, None, None, None, None, None)
     best_index = evaluated_paths[0][0]
     best_path = evaluated_paths[0][2]        
     best_objective = path_rewards[0] 
     s_covariances = evaluated_paths[0][4] 
     deviation_covariances = evaluated_paths[0][5]
     estimated_deviation_covariances = evaluated_paths[0][6]
     for i in xrange(1, len(path_rewards)):                        
         if path_rewards[i] > best_objective:
             best_index = evaluated_paths[i][0]                
             best_objective = path_rewards[i]                
             best_path = evaluated_paths[i][2]
             s_covariances = evaluated_paths[i][4]
             deviation_covariances = evaluated_paths[i][5]
             estimated_deviation_covariances = evaluated_paths[i][6]
     logging.info("PathEvaluator: Objective value for the best path is " + str(best_objective))
     return (best_index,
             best_path[0], 
             best_path[1], 
             best_path[2], 
             best_path[3], 
             best_objective, 
             s_covariances,
             deviation_covariances,
             estimated_deviation_covariances)
开发者ID:hoergems,项目名称:LQG,代码行数:61,代码来源:path_evaluator.py

示例8: TestMultiUserProcess

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import qsize [as 别名]
def TestMultiUserProcess(filenames,threadnums,T):

    allnums=sum(threadnums)
    eachnums=allnums/Global.CPUs
    actLists=[]
    for i in range(len(threadnums),1,-1):
        threadnums[i-1]=sum(threadnums[0:i])
    for i in range(0,len(filenames)):
        actList=[]
        for line in open(filenames[i]):
            words=line.split("||")
            temp=[]
            for w in words:
                temp.append(w)
            actList.append(temp)
        actLists.append(actList)
    del Global.result[:]
    Q=Queue(Global.CPUs)
    lock = multiprocessing.Lock()
    for i in range(0,Global.CPUs):
        p = multiprocessing.Process(target = MultiUserTest, args = (Q,lock,eachnums,threadnums,actLists,T,i,))
        p.start()
    st=time.time()
    while True:
        time.sleep(1)
        if Q.qsize()==Global.CPUs:
            break
    for i in range(0,Global.CPUs):
        Global.result.extend(Q.get(i))
开发者ID:386,项目名称:httploadtest,代码行数:31,代码来源:httpload.py

示例9: run

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import qsize [as 别名]
	def run(self):
                program_start = time.time()

		wQ = Queue()
                wQ.cancel_join_thread()
		wP = []
		
		for i in range(tp.threads):
			p = Process(target=tp.process_file_write, args=(i,wQ,))
			wP.append(p)
			p.start()

                # 10000 files enter here
                sys.stdout.write(Fore.GREEN + "Reading from stdin...")
                input_files = sys.stdin.readlines()
                for input_file in input_files:
                        wQ.put(input_file.rstrip())
                print Fore.GREEN + "\rDone reading from stdin. I found %d files." % (wQ.qsize())

		for _ in wP:
			wQ.put(None)

		for p in wP:
			p.join()

                program_stop = time.time()
		print Fore.GREEN + "Processed %d files in %f seconds (%fs avg)" % (len(input_files), program_stop-program_start, (program_stop-program_start)/len(input_files))
开发者ID:naiaden,项目名称:VacCor,代码行数:29,代码来源:pronew.py

示例10: crunch

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import qsize [as 别名]
def crunch(file_name, ext_type, handler, pool_size=4, queue_size=40,
           limit=None):

    print 'Crunching file: %s, limit: %s' % (file_name, limit)

    q = JoinableQueue(queue_size)
    q_feats = Queue()

    pool = Pool(pool_size, wrap_handler(handler), ((q, q_feats),))

    with file_reader(file_name) as reader:
        idx = 0
        for entry in reader:

            if (entry.pathname.find(ext_type) != -1):
                text = [b for b in entry.get_blocks()]
                key = entry.pathname.split('/')[-1].split('.')[0]

                q.put((key, text), True)
                idx += 1

                print 'Processing:', entry.pathname, idx

                if limit and idx >= limit:
                    print 'Reached the limit'
                    break

        q.close()
        q.join()
        pool.close()

    result = []
    for i in range(q_feats.qsize()):
        result.append(q_feats.get())
    return result
开发者ID:Anhmike,项目名称:kaggle-malware-classification,代码行数:37,代码来源:sevenz_cruncher.py

示例11: execute

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import qsize [as 别名]
 def execute(self):
     """
     Executing every forest in collection, activating their networks.
     By the way collecting data about best fitness function.
     """
     process_list = []
     forests_queue = Queue(self.power)
     iterational = 0
     print '| |-starting evaluation, training and validation'
     for one_forest in self._forests:
         process_list.append(
             Process(target=main_async_method,
                     args=(forests_queue, copy(one_forest.to_portal()), iterational, self.settings)))
         iterational += 1
     for proc in process_list:
         proc.start()
     for proc in process_list:
         proc.join()
     for smth in range(forests_queue.qsize()):
         tmp = forests_queue.get()
         self._forests[tmp['place']].fitness = tmp['fitness']
     fitness_summ = sum(map(lambda forest: forest.fitness, self._forests))
     fss = map(lambda x: x.fitness, self._forests)
     print 'avg = ', str(sum(fss) / len(fss)), 'max = ', max(fss)
     self.roulet = map(lambda x: x.fitness / fitness_summ, self._forests)
开发者ID:UIR-workigteam,项目名称:UIR,代码行数:27,代码来源:ForestsCollection.py

示例12: test_recycle

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import qsize [as 别名]
def test_recycle(r):
    # set up the Sentinel
    list_key = 'test_recycle_test:q'
    async('django_q.tests.tasks.multiply', 2, 2, list_key=list_key, redis=r)
    async('django_q.tests.tasks.multiply', 2, 2, list_key=list_key, redis=r)
    async('django_q.tests.tasks.multiply', 2, 2, list_key=list_key, redis=r)
    start_event = Event()
    stop_event = Event()
    # override settings
    Conf.RECYCLE = 2
    Conf.WORKERS = 1
    # set a timer to stop the Sentinel
    threading.Timer(3, stop_event.set).start()
    s = Sentinel(stop_event, start_event, list_key=list_key)
    assert start_event.is_set()
    assert s.status() == Conf.STOPPED
    assert s.reincarnations == 1
    async('django_q.tests.tasks.multiply', 2, 2, list_key=list_key, redis=r)
    async('django_q.tests.tasks.multiply', 2, 2, list_key=list_key, redis=r)
    task_queue = Queue()
    result_queue = Queue()
    # push two tasks
    pusher(task_queue, stop_event, list_key=list_key, r=r)
    pusher(task_queue, stop_event, list_key=list_key, r=r)
    # worker should exit on recycle
    worker(task_queue, result_queue, Value('f', -1))
    # check if the work has been done
    assert result_queue.qsize() == 2
    # save_limit test
    Conf.SAVE_LIMIT = 1
    result_queue.put('STOP')
    # run monitor
    monitor(result_queue)
    assert Success.objects.count() == Conf.SAVE_LIMIT
    r.delete(list_key)
开发者ID:sebasmagri,项目名称:django-q,代码行数:37,代码来源:test_cluster.py

示例13: MyOVBox

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import qsize [as 别名]
class MyOVBox(OVBox):
	def __init__(self):
		OVBox.__init__(self)
		self.p = None
		self.q = None
	
	def f(self, queue):
		while True:
			queue.put('hello')
			time.sleep(1)
	
	def initialize(self):
		print "process initialize!"
		self.q = Queue()
		self.p = Process(target=self.f, args=(self.q,))
		self.p.start()
	
	def process(self):
		for i in range(self.q.qsize()):
			print self.q.get()
	
	def uninitialize(self):
		print "process uninitialize!"
		self.p.terminate()
		self.p.join()				
开发者ID:Akanoa,项目名称:PRI,代码行数:27,代码来源:python-multiprocessing.py

示例14: runTest

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import qsize [as 别名]
def runTest(num_proc, query_num):
    print "Simulate concurrent user: {}".format(num_proc)

    jobs = []
    queue = Queue()
    try:
        for num in range(num_proc):      
            p = Process(target=runCmdWorker, args=(query_num, queue))
            jobs.append(p)
            p.start()

        for j in jobs:
            j.join()

            if j.exitcode != 0:
                    print '%s.exitcode = %s' % (j.name, j.exitcode)

    finally:
        total_avg = 0.0
        for i in range(queue.qsize()):
            result = queue.get()
            proc_avg = float(result.split(':')[1])
            total_avg = total_avg + proc_avg
            #print result

        print "average execution time: {:.8f}s".format(total_avg/num_proc)
        print 
开发者ID:bigobject-inc,项目名称:StressTest,代码行数:29,代码来源:Stress.py

示例15: test_recycle

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import qsize [as 别名]
def test_recycle(broker, monkeypatch):
    # set up the Sentinel
    broker.list_key = 'test_recycle_test:q'
    async('django_q.tests.tasks.multiply', 2, 2, broker=broker)
    async('django_q.tests.tasks.multiply', 2, 2, broker=broker)
    async('django_q.tests.tasks.multiply', 2, 2, broker=broker)
    start_event = Event()
    stop_event = Event()
    # override settings
    monkeypatch.setattr(Conf, 'RECYCLE', 2)
    monkeypatch.setattr(Conf, 'WORKERS', 1)
    # set a timer to stop the Sentinel
    threading.Timer(3, stop_event.set).start()
    s = Sentinel(stop_event, start_event, broker=broker)
    assert start_event.is_set()
    assert s.status() == Conf.STOPPED
    assert s.reincarnations == 1
    async('django_q.tests.tasks.multiply', 2, 2, broker=broker)
    async('django_q.tests.tasks.multiply', 2, 2, broker=broker)
    task_queue = Queue()
    result_queue = Queue()
    # push two tasks
    pusher(task_queue, stop_event, broker=broker)
    pusher(task_queue, stop_event, broker=broker)
    # worker should exit on recycle
    worker(task_queue, result_queue, Value('f', -1))
    # check if the work has been done
    assert result_queue.qsize() == 2
    # save_limit test
    monkeypatch.setattr(Conf, 'SAVE_LIMIT', 1)
    result_queue.put('STOP')
    # run monitor
    monitor(result_queue)
    assert Success.objects.count() == Conf.SAVE_LIMIT
    broker.delete_queue()
开发者ID:fle-internal,项目名称:django-q,代码行数:37,代码来源:test_cluster.py


注:本文中的multiprocessing.Queue.qsize方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。