当前位置: 首页>>代码示例>>Python>>正文


Python multiprocessing.Queue类代码示例

本文整理汇总了Python中multiprocessing.Queue的典型用法代码示例。如果您正苦于以下问题:Python Queue类的具体用法?Python Queue怎么用?Python Queue使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Queue类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: recoverPRNGState

def recoverPRNGState(cookie,timeMillisEstimate,PRNGMillisEstimate,IPAddr,serverPort,numWorkers,chunkSize):
    global PRNGMillisDelta
    global initalSeek
    
    q = Queue(0)
    i = 0
    
    if PRNGMillisDelta%chunkSize > 0:
        q.put((PRNGMillisEstimate+PRNGMillisDelta-PRNGMillisDelta%chunkSize,PRNGMillisEstimate+PRNGMillisDelta,initalSeek))
    
    for i in range(PRNGMillisEstimate,PRNGMillisEstimate+PRNGMillisDelta-PRNGMillisDelta%chunkSize,chunkSize):
        q.put((i,i+chunkSize,initalSeek))
        
    desc = []

    seedValue = Value('d', 0)

    # Start worker processes and assign work.                     
    for i in range(numWorkers):
        p = Process(target=recoverPRNGStateWorker, args=(cookie,timeMillisEstimate,q,IPAddr,serverPort,seedValue))
        p.start()
        desc.append(p)
        
    # Wait for worker processes finish.       
    for p in desc:
        p.join()

    return long(seedValue.value)
开发者ID:votadlos,项目名称:JavaCG,代码行数:28,代码来源:WinstoneSessionCatcher.py

示例2: main

def main(world_folder, replacement_file_name):
    global replacements
    world = nbt.world.WorldFolder(world_folder)
    logger = configure_logging()
    logger.info("Starting processing of %s", world_folder)
    if not isinstance(world, nbt.world.AnvilWorldFolder):
        logger.error("%s is not an Anvil world" % (world_folder))
        return 65 # EX_DATAERR
    if replacement_file_name != None:
        logger.info("Using Replacements file: %s", replacement_file_name)
        with open(replacement_file_name, 'r') as replacement_file:
            replacements = json.load(replacement_file)
    # get list of region files, going to pass this into function to process region
    region_files = world.get_regionfiles();
    
    # Parallel
    q = Queue()
    lp = threading.Thread(target=logger_thread, args=[q])
    lp.start()
    p = Pool(initializer=process_init, initargs=[q,replacements], maxtasksperchild=1)
    region_data = p.map(process_region, region_files)
    # Map has finished up, lets close the logging QUEUE
    q.put(None)
    lp.join()
    
    # Not Parallel
#     region_data = map(process_region, region_files)
    
    # Write output data
    write_block_data(region_data,"output.txt")
    return 0
开发者ID:bot190,项目名称:MinecraftAnalysis,代码行数:31,代码来源:WorldAnalysis.py

示例3: main

def main():
    arg = parse_args()
    folder = arg.fold
    core = arg.core
    output = arg.out
    start = arg.start
    if start:
        start = start.replace('-', '') + '000000'

    task_queue = Queue()
    result_queue = Queue()

    task_count = create_task(folder, task_queue, start)
    print task_count
    for i in range(core):
        Process(target=worker, args=(task_queue, result_queue)).start()

    #send stop signal
    for i in range(core):
        task_queue.put('STOP')

    #print result
    out_files = {}
    for i in range(task_count):
        actions = result_queue.get()
        user = actions["user"]
        for day in actions["actions"]:
            if day not in out_files:
                out_files[day] = open(os.path.join(output, day), "w")
            out_files[day].write(json.dumps({"user": user, "actions": actions["actions"][day]}) + "\n")
    for day in out_files:
        out_files[day].flush()
        out_files[day].close()
开发者ID:Tskatom,项目名称:company_market,代码行数:33,代码来源:extractDailyInteraction.py

示例4: send_probe_requests

def send_probe_requests(interface=None, ssid=None):

    # initialize shared memory
    results = Queue()

    # start sniffer before sending out probe requests
    p = Process(target=sniffer, args=(interface, results,))
    p.start()

    # give sniffer a chance to initialize so that we don't miss
    # probe responses
    time.sleep(3)

    # send out probe requests... sniffer will catch any responses
    ProbeReq(ssid=ssid, interface='wlp3s0')

    # make sure to get results from shared memory before allowing 
    # sniffer to join with parent process 
    probe_responses = results.get()

    # join sniffer with its parent process
    p.join()

    # return results
    return probe_responses
开发者ID:BwRy,项目名称:sentrygun,代码行数:25,代码来源:sniffer.py

示例5: test_report_hash_added_after_send

    def test_report_hash_added_after_send(self, fromConfig, fromOptions, getLogger):
        # Side effect for fromConfig
        def fake_virts(logger, config):
            new_fake_virt = Mock()
            new_fake_virt.config.name = config.name
            return new_fake_virt

        fromConfig.side_effect = fake_virts
        options = Mock()
        options.interval = 0
        options.oneshot = True
        options.print_ = False
        options.log_file = ''
        options.log_dir = ''
        virtwho = VirtWho(self.logger, options, config_dir="/nonexistant")

        def send(report):
            report.state = AbstractVirtReport.STATE_FINISHED
            return True
        virtwho.send = Mock(side_effect=send)
        queue = Queue()
        virtwho.queue = queue
        virtwho.retry_after = 1
        virtwho.configManager.addConfig(self.config)
        virtwho.configManager.addConfig(self.second_config)
        queue.put(self.fake_report)
        queue.put(self.fake_domain_list)
        virtwho.run()

        self.assertEquals(virtwho.send.call_count, 2)
        self.assertEqual(virtwho.last_reports_hash[self.config.name], self.fake_report.hash)
        self.assertEqual(virtwho.last_reports_hash[self.second_config.name], self.fake_domain_list.hash)
开发者ID:mtulio,项目名称:virt-who,代码行数:32,代码来源:test_virtwho.py

示例6: __init__

class UpDown:

    def __init__(self, down_workers=2, up_workers=2, db=None):
        self.down_workers_num = down_workers
        self.up_workers_num = up_workers
        self.db = db
        self.base_url = "http://eol.jsc.nasa.gov/SearchPhotos/"
        self.down_workers = []
        self.up_workers = []
        self.to_upload = []
        self.q = Queue()

    def down_worker(self, download_url, image_id):
        """
        Download images and set the database after the download was complete.
        """
        down = ImageDownload(self.base_url + download_url)
        down.find_urls()
        if(down.dl()):
            self.db.update_image_downloaded(image_id, down.file_name)

    def up_worker(self, mission_id):
        """
        Check for images that are downloaded but not uploaded every minute.
        """
        while True:
            self.to_upload = self.db.get_to_upload(mission_id)
            print "No files to upload found!\n"
            if(len(list(self.to_upload)) > 0):
                print "Found a file to upload!\n"
                self.to_upload = list(self.db.get_to_upload(mission_id))
                self.q.put(self.to_upload)
            time.sleep(60)
开发者ID:PlanetHunt,项目名称:nasaeol,代码行数:33,代码来源:UpDown.py

示例7: test_req_all_open_orders

 def test_req_all_open_orders(self):
     result_queue = Queue()
     class MockClientSocket(ClientSocket):
         def __init__(self):
             ClientSocket.__init__(self)
         def open_order(self, req_id, contract, order):
             result_queue.put(req_id)
             result_queue.put(contract)
             result_queue.put(order)
         def open_order_end(self):
             result_queue.put('open_order_end')
         def order_status(self, req_id, status, filled, remaining,
                          avg_fill_price, perm_id, parent_id,
                          last_fill_price, client_id, why_held):
             result_queue.put(req_id)
             result_queue.put(status)
             result_queue.put(filled)
             result_queue.put(remaining)
             result_queue.put(avg_fill_price)
             result_queue.put(perm_id)
             result_queue.put(parent_id)
             result_queue.put(last_fill_price)
             result_queue.put(client_id)
             result_queue.put(why_held)
     client = MockClientSocket()
     client.connect()
     client.req_all_open_orders()
     while True:
         result = result_queue.get()
         self.assertIsNotNone(result)
         if result == 'open_order_end':
             break
     client.disconnect()
开发者ID:schalekamp,项目名称:ibapipy,代码行数:33,代码来源:client_socket_tests.py

示例8: test

    def test():

        queue = Queue()

        proc = Process(target=doNothing, args=(queue, ))
        proc.start()

        _logger.info("Started dummy process with PID %d", proc.pid)
        startCodeCheckerServerAttachedToPid(proc.pid)
        time.sleep(3)
        _logger.info("Allowing the dummy process to finish")
        queue.put(1)
        proc.join()

        if utils.isProcessRunning(proc.pid):
            _logger.warning("Dummy process %d was still running", proc.pid)
            proc.terminate()
            time.sleep(1)
            it.assertFalse(utils.isProcessRunning(proc.pid),
                           "Process %d is still running after terminating "
                           "it!" % proc.pid)

        time.sleep(1)
        _logger.info("Server should have died by now")

        with it.assertRaises(requests.ConnectionError):
            requests.post(it._url + '/get_diagnose_info')
开发者ID:suoto,项目名称:hdlcc,代码行数:27,代码来源:test_hdlcc_server.py

示例9: MultiSegmentWriter

class MultiSegmentWriter(IndexWriter):
    def __init__(self, index, procs=2, **writerargs):
        self.index = index
        self.lock = index.storage.lock(index.indexname + "_LOCK")
        self.tasks = []
        self.postingqueue = Queue()
        #self.resultqueue = Queue()
        
        names = [index._next_segment_name() for _ in xrange(procs)]
        
        self.tasks = [SegmentWritingTask(index.storage, index.indexname,
                                         segname, writerargs, self.postingqueue)
                      for segname in names]
        for task in self.tasks:
            task.start()
        
    def add_document(self, **args):
        self.postingqueue.put(args)
        
    def cancel(self):
        for task in self.tasks:
            task.cancel()
        self.lock.release()
        
    def commit(self):
        procs = len(self.tasks)
        for _ in xrange(procs):
            self.postingqueue.put(None)
        for task in self.tasks:
            print "Joining", task
            task.join()
            self.index.segments.append(task.get_segment())
        self.index.commit()
        self.lock.release()
开发者ID:KeNJiKunG,项目名称:E-Tipitaka-for-PC,代码行数:34,代码来源:multiproc.py

示例10: f

def f(idx, q,r):
    path = "data%s"%(idx)
    os.makedirs(path)
    while True:
        item = q.get()
        if( item.item_type == ITEM_QUIT ):
            break;

        count = 0
        localQueue = Queue()
        current = item.data
        while True:
            print current
            fo = urlopen(current)
            data = fo.read()
            name = "%s/%s"%(path,count)
            fw = open( name, "w" )
            count = count + 1
            fw.write(data)
            fw.close()
            fo.close()
            p = MyHTMLParser()
            try:
                p.feed(data)
            except:
                pass

            for href in p.hrefs:
                print item.data, ": ", href

            try:
                current = localQueue.get_nowait()
            except:
                break;
开发者ID:charsyam,项目名称:pythoncrawl,代码行数:34,代码来源:main.py

示例11: Updater

class Updater(Process):

    def __init__(self, maxsize=15):
        Process.__init__(self)
        #self.queue = Queue(maxsize)
        self.queue = Queue()
        self.queue_lock = Lock()
        self._exit = Event()

    def run(self):
        while not self._exit.is_set():
            #with self.queue_lock:
            self.queue.put(self.receive())
            #self.queue.put_nowait(self.receive())
            #if self.queue.full():
            #    try:
            #        self.queue.get_nowait()
            #    except:
            #        pass

    def stop(self):
        self._exit.set()
        # This leaves the process hanging on Windows
        #self.join(STOP_TIMEOUT)
        if self.is_alive():
            #TODO make a nicer warning
            print 'Terminating updater:', self
            self.terminate()

    def receive(self):
        raise NotImplementedError
开发者ID:KN2C,项目名称:pyroboime,代码行数:31,代码来源:updater.py

示例12: likelihood_mp_simple

def likelihood_mp_simple(seqlens,fss,uon,bon,theta,seqnum,K,ufnum,bfnum,regtype,sigma):
    global _gradient
    grad = numpy.array(fss,copy=True)  # data distribuition
    likelihood = numpy.dot(fss,theta)
    que1 = Queue() # for the likihood output
    que2 = Queue() # for the gradient output
    np = 0
    subprocesses = []
    corenum=multiprocessing.cpu_count()
    #corenum=1
    if corenum>1:
        chunk=seqnum/corenum+1
    else:
        chunk=seqnum
    starti=0
    while starti < (seqnum):
        endi=starti+chunk
        if endi>seqnum:
            endi=seqnum    
        p = Process(target=likelihoodthread_simple, 
            args=(seqlens[starti:endi],uon[starti:endi],bon[starti:endi],theta,K,ufnum,bfnum,que1,que2))
        p.start()
        np+=1
        #print 'delegated %s:%s to subprocess %s' % (starti, endi, np)
        subprocesses.append(p)
        starti += chunk
    for i in range(np):
        likelihood += que1.get()
    for i in range(np):
        grad += que2.get()
    while subprocesses:
        subprocesses.pop().join()
    grad -= regularity_deriv(theta,regtype,sigma)
    _gradient = grad
    return likelihood - regularity(theta,regtype,sigma)
开发者ID:liuminglu19870419,项目名称:python_crf,代码行数:35,代码来源:LinearCRF2.py

示例13: likelihood_multithread_O

def likelihood_multithread_O(seqlens,fss,uon,bon,theta,seqnum,K,ufnum,bfnum):   # multithread version of likelihood
    '''conditional log likelihood log p(Y|X)'''
    likelihood = numpy.dot(fss,theta)
    thetab=theta[0:bfnum]
    thetau=theta[bfnum:]
    que = Queue()
    np = 0
    subprocesses = []
    corenum=multiprocessing.cpu_count()
    #corenum=1
    if corenum>1:
        chunk=seqnum/corenum+1
    else:
        chunk=seqnum
    starti=0
    while starti < (seqnum):
        endi=starti+chunk
        if endi>seqnum:
            endi=seqnum    
        p = Process(target=likelihoodthread, 
           args=(seqlens,uon,bon,thetau,thetab,seqnum,K,ufnum,bfnum,starti,endi,que))
        p.start()
        np+=1
        #print 'delegated %s:%s to subprocess %s' % (starti, endi, np)
        subprocesses.append(p)
        starti += chunk
    for i in range(np):
        likelihood += que.get()
    while subprocesses:
        subprocesses.pop().join()
    return likelihood - regularity(theta)
开发者ID:liuminglu19870419,项目名称:python_crf,代码行数:31,代码来源:LinearCRF2.py

示例14: BackgroundProcess

class BackgroundProcess(object):
    """A background process that reads batches and stores them in a queue.

    The :meth:`main` method needs to be called in order to start reading
    batches into the queue. Note that this process will run infinitely;
    start it as a :attr:`~multiprocessing.Process.daemon` to make sure it
    will get killed when the main process exits.

    Parameters
    ----------
    data_stream : :class:`.DataStream` or :class:`Transformer`
        The data stream from which to read batches.
    max_batches : int
        The maximum number of batches to store in the queue. If reached,
        the process wil block until a batch is popped from the queue.

    """
    def __init__(self, data_stream, max_batches):
        self.data_stream = data_stream
        self.batches = Queue(max_batches)
        self.run_background = True

    def main(self):
        while True:
            iterator = self.data_stream.get_epoch_iterator()
            for batch in iterator:
                self.batches.put(batch)
            self.batches.put(StopIteration)

    def get_next_data(self):
        return self.batches.get()
开发者ID:yk,项目名称:fuel,代码行数:31,代码来源:__init__.py

示例15: JobPool

class JobPool(object):

    """
    Pool container.
    """
    pool = None
    message_queue = None

    def __init__(self, max_instances=4):
        self.message_queue = Queue()
        self.pool = Pool(max_instances, execute_task, (self.message_queue,))
        atexit.register(self.clear)

    def add_analysis(self, analysis):
        """
        Add analysis to the pool.
        """
        analysis.set_started()
        self.message_queue.put(analysis)

    def clear(self):
        """
        Pool cleanup.
        """
        self.pool.terminate()
        self.pool.join()
开发者ID:ANSSI-FR,项目名称:polichombr,代码行数:26,代码来源:jobpool.py


注:本文中的multiprocessing.Queue类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。