当前位置: 首页>>代码示例>>Python>>正文


Python Queue.put方法代码示例

本文整理汇总了Python中multiprocessing.Queue.put方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.put方法的具体用法?Python Queue.put怎么用?Python Queue.put使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.Queue的用法示例。


在下文中一共展示了Queue.put方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _run_parallel

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import put [as 别名]
    def _run_parallel(self, processes=2, progress_bar=False):
        """
        Run all matches in parallel

        Parameters
        ----------

        progress_bar : bool
            Whether or not to update the tournament progress bar
        """
        # At first sight, it might seem simpler to use the multiprocessing Pool
        # Class rather than Processes and Queues. However, Pool can only accept
        # target functions which can be pickled and instance methods cannot.
        work_queue = Queue()
        done_queue = Queue()
        workers = self._n_workers(processes=processes)

        chunks = self.match_generator.build_match_chunks()
        for chunk in chunks:
            work_queue.put(chunk)

        self._start_workers(workers, work_queue, done_queue)
        self._process_done_queue(workers, done_queue, progress_bar=progress_bar)

        return True
开发者ID:paultopia,项目名称:Axelrod,代码行数:27,代码来源:tournament.py

示例2: processFiles

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import put [as 别名]
def processFiles(patch_dir):
    root = os.getcwd()
    glbl.data_dirs = {}
    if root != patch_dir: working_path = root+"/"+patch_dir
    else: working_path = root

    for path, dirs, files in os.walk(working_path):
        if len(dirs) == 0: glbl.data_dirs[path] = ''
    

    # Multiprocessing Section
    #########################################
    Qids = glbl.data_dirs.keys()
    manager = Manager()                                      # creates shared memory manager object
    results = manager.dict()                                 # Add dictionary to manager, so it can be accessed across processes
    nextid = Queue()                                         # Create Queue object to serve as shared id generator across processes
    for qid in Qids: nextid.put(qid)                         # Load the ids to be tested into the Queue
    for x in range(0,multiprocessing.cpu_count()):           # Create one process per logical CPU
        p = Process(target=processData, args=(nextid,results)) # Assign process to processCBR function, passing in the Queue and shared dictionary
        glbl.jobs.append(p)                                   # Add the process to a list of running processes
        p.start()                                             # Start process running
    for j in glbl.jobs:
        j.join()                                              # For each process, join them back to main, blocking on each one until finished
    
    # write out results
    c = 1
    sets = results.keys()
    sets.sort()
    for x in sets:
        if results[x] != 'None':
            FINAL = open('result'+str(c)+'.txt','w')
            n = "\n************************************************************************************************\n"
            FINAL.write(n+"* "+x+'    *\n'+n+results[x]+"\n")
            FINAL.close()     
            c += 1
开发者ID:talonsensei,项目名称:Bfx_scripts,代码行数:37,代码来源:processPatchesv4_Rpy1.py

示例3: Manager

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import put [as 别名]
class Manager(Process):
    def __init__(self, wnum=3):
        Process.__init__(self)
        self.s2m = Queue()  # message Manager receive from worker and svr
        self.m2w = Queue()  # message send to works
        self.works = [0] * wnum
        for i in range(wnum):
            self.works[i] = Worker(self.s2m, self.m2w)
            self.works[i].start()

    def stop(self):
        for w in self.works:
            self.m2w.put(None)  # FIXME should call worker.Terminal?

    """
Video Site: bilibili.com
Title:      【BD‧1080P】【高分剧情】鸟人-飞鸟侠 2014【中文字幕】
Type:       Flash video (video/x-flv)
Size:       3410.85 MiB (3576536465 Bytes)

Downloading 【BD‧1080P】【高分剧情】鸟人-飞鸟侠 2014【中文字幕】.flv ...
  0.7% ( 22.2/3410.9MB) [#
    """

    def run(self):
        # reset DB flags
        kuos = get_by_flag(WORK)
        for uo in kuos:
            set_flag(uo.mid, STOP)
        tuos = get_by_flag(WAIT)
        for uo in tuos:
            set_flag(uo.mid, STOP)

        while True:
            msg = self.s2m.get()
            # print("pid=%s, self.s2m.get=%s" % (os.getpid(), repr(msg)))
            who = msg.get("who")
            if who == "worker":
                self.handle_mid(msg["mid"], msg["dat"])
            elif who == "svr":
                # self.m2w.put(msg['mid'])
                self.m2w.put(pick_url(msg["mid"]))
            elif who == "error":
                sys.stderr.write(msg["dat"])  # FIXME
                sys.stderr.write("\n")
            else:
                sys.stderr.write("Unknow msg:\n")
                sys.stderr.write(msg)
                sys.stderr.write("\n")

    def handle_mid(self, mid, dat):
        print(dat)
        if dat.startswith("Process "):
            dd = dat.split()
            act = dd[2].lower()
            print("mid=%s, act=%s" % (mid, act))
            set_flag(mid, act)
        elif dat.startswith("Downloading "):
            print("mid=[%s]" % mid)
            update_filename(mid, dat[12:-5])
开发者ID:pastebt,项目名称:you-get-wui,代码行数:62,代码来源:dwn.py

示例4: test_report_hash_added_after_send

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import put [as 别名]
    def test_report_hash_added_after_send(self, fromConfig, fromOptions, getLogger):
        # Side effect for fromConfig
        def fake_virts(logger, config):
            new_fake_virt = Mock()
            new_fake_virt.config.name = config.name
            return new_fake_virt

        fromConfig.side_effect = fake_virts
        options = Mock()
        options.interval = 0
        options.oneshot = True
        options.print_ = False
        options.log_file = ''
        options.log_dir = ''
        virtwho = VirtWho(self.logger, options, config_dir="/nonexistant")

        def send(report):
            report.state = AbstractVirtReport.STATE_FINISHED
            return True
        virtwho.send = Mock(side_effect=send)
        queue = Queue()
        virtwho.queue = queue
        virtwho.retry_after = 1
        virtwho.configManager.addConfig(self.config)
        virtwho.configManager.addConfig(self.second_config)
        queue.put(self.fake_report)
        queue.put(self.fake_domain_list)
        virtwho.run()

        self.assertEquals(virtwho.send.call_count, 2)
        self.assertEqual(virtwho.last_reports_hash[self.config.name], self.fake_report.hash)
        self.assertEqual(virtwho.last_reports_hash[self.second_config.name], self.fake_domain_list.hash)
开发者ID:mtulio,项目名称:virt-who,代码行数:34,代码来源:test_virtwho.py

示例5: __init__

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import put [as 别名]
class UpDown:

    def __init__(self, down_workers=2, up_workers=2, db=None):
        self.down_workers_num = down_workers
        self.up_workers_num = up_workers
        self.db = db
        self.base_url = "http://eol.jsc.nasa.gov/SearchPhotos/"
        self.down_workers = []
        self.up_workers = []
        self.to_upload = []
        self.q = Queue()

    def down_worker(self, download_url, image_id):
        """
        Download images and set the database after the download was complete.
        """
        down = ImageDownload(self.base_url + download_url)
        down.find_urls()
        if(down.dl()):
            self.db.update_image_downloaded(image_id, down.file_name)

    def up_worker(self, mission_id):
        """
        Check for images that are downloaded but not uploaded every minute.
        """
        while True:
            self.to_upload = self.db.get_to_upload(mission_id)
            print "No files to upload found!\n"
            if(len(list(self.to_upload)) > 0):
                print "Found a file to upload!\n"
                self.to_upload = list(self.db.get_to_upload(mission_id))
                self.q.put(self.to_upload)
            time.sleep(60)
开发者ID:PlanetHunt,项目名称:nasaeol,代码行数:35,代码来源:UpDown.py

示例6: ParCalculate

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import put [as 别名]
def ParCalculate(systems,calc,cleanup=True,block=True,prefix="Calc_"):
    '''
    Run calculators in parallel for all systems. 
    Calculators are executed in isolated processes and directories.
    The resulting objects are returned in the list (one per input system).
    '''

    if type(systems) != type([]) :
        sysl=[systems]
    else :
        sysl=systems

    if block :
        iq=Queue(len(sysl)+1)
        oq=Queue(len(sysl)+1)
            
        # Create workers    
        for s in sysl:
            __PCalcProc(iq, oq, calc, prefix=prefix, cleanup=cleanup).start()

        # Put jobs into the queue
        for n,s in enumerate(sysl):
            iq.put([n,s])
            # Protection against too quick insertion
            time.sleep(0.2)
        
        if verbose : 
            print("Workers started:", len(sysl))
        
       # Collect the results
        res=[]
        while len(res)<len(sysl) :
            n,s=oq.get()
            res.append([n,s])
            #print("Got from oq:", n, s.get_volume(), s.get_pressure())
    else :
        # We do not need the multiprocessing complications for non-blocking 
        # workers. We just run all in sequence.
        basedir=os.getcwd()
        res=[]
        for n,s in enumerate(sysl):
            s.set_calculator(copy.deepcopy(calc))
            s.get_calculator().block=block
            place=tempfile.mkdtemp(prefix=prefix, dir=basedir)
            os.chdir(place)
            s.get_calculator().working_dir=place
            #print("Start at :", place)
            if hasattr(calc, 'name') and calc.name=='Siesta':
                s.get_potential_energy()
            else:
                s.get_calculator().calculate(s)
            os.chdir(basedir)
            #print("Submited", s.get_calculator().calc_finished(), os.getcwd())
            # Protection against too quick insertion
            time.sleep(0.2)
            res.append([n,s])
        if verbose : 
            print("Workers started:", len(sysl))
            
    return [r for ns,s in enumerate(sysl) for nr,r in res if nr==ns]
开发者ID:digideskio,项目名称:Elastic,代码行数:62,代码来源:parcalc.py

示例7: __init__

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import put [as 别名]
class TaskQueue:
    N = 4
    symb = string.ascii_letters + string.digits
    
    def __init__(self):
        self.tasks = Queue()
        self.done = Queue()
        self.results = {}
        self.processes = []
        for i in range(TaskQueue.N):
            self.processes.append(Process(target=self.run_tasks))
            self.processes[-1].start()
        threading.Thread(target=self.collect_results).start()

    def add(self, f, args):
        id = ''.join(random.choice(TaskQueue.symb) for i in range(15))
        self.tasks.put((id, f,args))
        return id

    def get(self, id):
        return self.results.pop(id, '_NotFound_')
            
    def run_tasks(self):
        for id, func, args in iter(self.tasks.get, 'STOP'):
            result = func(*args)
            self.done.put((id,result))

    def collect_results(self):
        for id, r in iter(self.done.get, 'STOP'):
            self.results[id] = r
开发者ID:vtphan,项目名称:neo,代码行数:32,代码来源:neo.py

示例8: get_citing_papers

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import put [as 别名]
def get_citing_papers(**args):
    # create the queues
    tasks = Queue()
    results = Queue()
    # how many threads are there to be used
    if 'threads' in args:
        threads = args['threads']
    else:
        threads = cpu_count()
    bibcodes = args.get('bibcodes',[])
    # initialize the "harvesters" (each harvester get the citations for a bibcode)
    harvesters = [ MongoCitationListHarvester(tasks, results) for i in range(threads)]
    # start the harvesters
    for b in harvesters:
        b.start()
    # put the bibcodes in the tasks queue
    num_jobs = 0
    for bib in bibcodes:
        tasks.put(bib)
        num_jobs += 1
    # add some 'None' values at the end of the tasks list, to faciliate proper closure
    for i in range(threads):
        tasks.put(None)
    # gather all results into one citation dictionary
    cit_list = []
    while num_jobs:
        data = results.get()
        cit_list += data.get('citations',[])
        num_jobs -= 1
    return cit_list
开发者ID:aburgm,项目名称:adsabs,代码行数:32,代码来源:utils.py

示例9: start_combo

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import put [as 别名]
def start_combo(argv):
    queue = Queue(10)
    test_input = TestInputParser.get_test_input(argv)
    thread = Thread(target=combo, args=(queue, test_input))
    thread.start()
    time.sleep(24 * 60 * 60)
    queue.put("stop")
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:9,代码来源:longevity.py

示例10: ProcessStuff

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import put [as 别名]
def ProcessStuff(spp_list):
	print 'cpu_count() = %d\n' % multiprocessing.cpu_count()
	NUMBER_OF_PROCESSES = multiprocessing.cpu_count()
	TASKS = [(CallMaxent, (spp_list[i],)) for i in range(len(spp_list))]
	#TASKS2 = [(plus, (i, 8)) for i in range(10)]

    # Create queues
	task_queue = Queue()
	done_queue = Queue()

	# Submit tasks
	for task in TASKS:
		task_queue.put(task)

    # Start worker processes
	for i in range(NUMBER_OF_PROCESSES):
		Process(target=worker, args=(task_queue, done_queue)).start()

    # Get and print results
	print 'Unordered results:'
	for i in range(len(TASKS)):
		print '\t', done_queue.get()

    # Tell child processes to stop
	for i in range(NUMBER_OF_PROCESSES):
		task_queue.put('STOP')
开发者ID:maduhu,项目名称:QSDM,代码行数:28,代码来源:Multiprocessing_Train_Models.py

示例11: solve

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import put [as 别名]
    def solve(self, problems, **kwargs):
        if type(problems) not in [list, ndarray]:
            problems = [problems]
        assert issubclass(type(problems[0]), _Problem), (
            'ParalelSolver argument is not a _Problem subclass')
        qin = Queue()
        qout = Queue()
        for i, pb in enumerate(problems):
            qin.put((i, pb))

        slaves = []
        for i in range(self.n_jobs):
            slaves += [WorkerSolver(qin, qout, id_w=i,
                                    debug=self.debug,
                                    **self.param)]
            qin.put((None, None))
            slaves[-1].start()

        # Join loop
        N_iter = len(problems)
        self.solutions = [0]*N_iter
        self.scores = [0]*N_iter
        for i in range(N_iter):
            idp, z, s = qout.get()
            self.solutions[idp] = z
            self.scores[idp] = s
            log.progress(name='Solver', iteration=i+1, i_max=N_iter)

        for s in slaves:
            s.join()
        self.problems = problems
        return self.solutions
开发者ID:tomMoral,项目名称:Toolbox,代码行数:34,代码来源:paralel_solver.py

示例12: __init__

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import put [as 别名]
class TweetManager:
      def __init__(self):
            self.sdb = boto.connect_sdb(setting.AWS_KEY, setting.AWS_SECRET)
            self.__keywords__ = get_filter_keywords(self.sdb)
            self.__cores__ = cpu_count()
            self.tweets_queue = Queue()
            self.db_tweets = self.sdb.get_domain(setting.SDB_DOMAIN)
            self.__buffer__ = ""
      
      def connect_twitter(self):
            self.conn = pycurl.Curl()
            self.conn.setopt(pycurl.POSTFIELDS,urllib.urlencode(self.__keywords__))
            self.conn.setopt(pycurl.USERPWD, "%s:%s" % (setting.TWITTER_ID, setting.TWITTER_PASSWORD))
            self.conn.setopt(pycurl.URL, setting.JSON_STREAMING_URI)
            print 'starting tweet_producer process'
            self.conn.setopt(pycurl.WRITEFUNCTION, lambda data: self.tweet_producer(data))

      
      def tweet_producer(self, tweet):
            self.__buffer__ += tweet
            if tweet.endswith("\r\n") and self.__buffer__.strip():
                  self.tweets_queue.put(self.__buffer__)
                  self.__buffer__ = ""

      def start(self):
            self.connect_twitter()
            print 'starting %d tweet_consumer process(s)' % self.__cores__
            self.consumers = [Process(target=tweet_consumer, args=(i, self.tweets_queue, self.db_tweets,))
                              for i in xrange(self.__cores__)]
            for c in self.consumers:
                  c.start()
            self.conn.perform()
开发者ID:ajauhri,项目名称:TweetAPI,代码行数:34,代码来源:TweetManager.py

示例13: main

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import put [as 别名]
def main():
    q = Queue()

    number_of_processes = 4
    plist = []
    
    for i in range(number_of_processes):
        plist.append(Process(target=f, args=('file_in.txt', i, q, number_of_processes)))

    for p in plist:
        p.start()
        
    for p in plist:
        p.join()
    
    q.put(None)
    
    print 'all joined!'
    # Loop through all the elements in the queue and write to file
    with open("file_out.txt", "w") as file_output:
        while True:
            item = q.get()
            print item

            if item is None:
                break
            print >>file_output, item
     
    print 'Done'
开发者ID:biwa7636,项目名称:binwangREPO,代码行数:31,代码来源:bigfile.py

示例14: parallel_work

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import put [as 别名]
def parallel_work(jobs, nr_of_threads):
    """
    Setup queues, start the processes and wait until the job is done
    """
    work_queue = Queue()
    result_queue = Queue()
    result = {}

    for job in jobs:
        work_queue.put(job)

    if nr_of_threads > len(jobs):
        nr_of_threads = len(jobs)

    for i in range(nr_of_threads):
        worker = Process(target=check_plugin, args=(work_queue,result_queue))
        worker.start()

    while len(result.keys()) < len(jobs):
        data = result_queue.get()

        if " | " in data[1]:
            (status, output) = data[1].split(" | ")
        else:
            status = "UNKNOWN"
            output = data[1]

        result[data[0]] = {"status": status, "output": output}
        #print "Host " + data[0] + " " + status

    return result
开发者ID:balle,项目名称:nagios,代码行数:33,代码来源:check_parallel.py

示例15: JobPool

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import put [as 别名]
class JobPool(object):

    """
    Pool container.
    """
    pool = None
    message_queue = None

    def __init__(self, max_instances=4):
        self.message_queue = Queue()
        self.pool = Pool(max_instances, execute_task, (self.message_queue,))
        atexit.register(self.clear)

    def add_analysis(self, analysis):
        """
        Add analysis to the pool.
        """
        analysis.set_started()
        self.message_queue.put(analysis)

    def clear(self):
        """
        Pool cleanup.
        """
        self.pool.terminate()
        self.pool.join()
开发者ID:ANSSI-FR,项目名称:polichombr,代码行数:28,代码来源:jobpool.py


注:本文中的multiprocessing.Queue.put方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。