当前位置: 首页>>代码示例>>Python>>正文


Python JoinableQueue.get_nowait方法代码示例

本文整理汇总了Python中multiprocessing.JoinableQueue.get_nowait方法的典型用法代码示例。如果您正苦于以下问题:Python JoinableQueue.get_nowait方法的具体用法?Python JoinableQueue.get_nowait怎么用?Python JoinableQueue.get_nowait使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.JoinableQueue的用法示例。


在下文中一共展示了JoinableQueue.get_nowait方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: MMapPool

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import get_nowait [as 别名]
class MMapPool(object):
    def __init__(self, n, mmap_size):
        self.n = n
        self.mmap_size = mmap_size
        self.pool = [mmap.mmap(-1, mmap_size) for _ in range(n)]
        self.free_mmaps = set(range(n))
        self.free_queue = JoinableQueue()

    def new(self):
        if not self.free_mmaps:
            self.free_mmaps.add(self.free_queue.get())
            self.free_queue.task_done()
        while True:
            try:
                self.free_mmaps.add(self.free_queue.get_nowait())
                self.free_queue.task_done()
            except Empty:
                break
        mmap_idx = self.free_mmaps.pop()
        return mmap_idx, self.pool[mmap_idx]

    def join(self):
        while len(self.free_mmaps) < self.n:
            self.free_mmaps.add(self.free_queue.get())
            self.free_queue.task_done()

    def get(self, idx):
        return self.pool[idx]

    def free(self, idx):
        self.free_queue.put(idx)
开发者ID:FlavioFalcao,项目名称:imposm,代码行数:33,代码来源:__init__.py

示例2: main

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import get_nowait [as 别名]
def main(factor = 2):
    #E.G: if total cores is 2 , no of processes to be spawned is 2 * factor
    files_to_download = JoinableQueue()
    result_queue = JoinableQueue()
    time_taken = JoinableQueue()
    time_taken_to_read_from_queue = JoinableQueue()
    with open('downloads.txt', 'r') as f:
        for to_download in f:
            files_to_download.put_nowait(to_download.split('\n')[0])
    files_to_download_size = files_to_download.qsize()
    cores = cpu_count()
    no_of_processes = cores * factor
    for i in xrange(no_of_processes):
        files_to_download.put_nowait(None)
    jobs = []
    start = datetime.datetime.now()
    for name in xrange(no_of_processes):
        p = Process(target = download, args = (files_to_download, result_queue,\
                                time_taken, time_taken_to_read_from_queue,name))
        p.start()
        jobs.append(p)

    for job in jobs:
        job.join()
    print result_queue.qsize()
    total_downloaded_urls = 0
    try:
        while 1:
            r = result_queue.get_nowait()
            total_downloaded_urls += r

    except Empty:
        pass

    try:
        while 1:
            """
                locals() keeps track of all variable, functions, class etc.
                datetime object is different from int, one cannot perform 
                0 + datetime.datetime.now(), if when we access the queue which 
                contains time objects first time, total_time will be set to 
                first time 
            """
            if 'total_time' in locals():
                total_time += time_taken.get_nowait()
            else:
                total_time = time_taken.get_nowait()
    except Empty:
        print("{0} processes on {1} core machine took {2} time to download {3}\
              urls".format(no_of_processes, cores, total_time, \
                                          total_downloaded_urls))

    try:
        while 1:
            if 'queue_reading_time' in locals():
                queue_reading_time += time_taken_to_read_from_queue.get_nowait()
            else:
                queue_reading_time = time_taken_to_read_from_queue.get_nowait()
    except Empty:
        print("{0} processes on {1} core machine took {2} time to read {3}\
              urls from queue".format(no_of_processes, cores,queue_reading_time\
              ,files_to_download_size))
开发者ID:kracekumar,项目名称:gevent_multiprocessing,代码行数:64,代码来源:multiprocessingrequests.py

示例3: ParasolBatchSystem

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import get_nowait [as 别名]
class ParasolBatchSystem(AbstractBatchSystem):
    """The interface for Parasol.
    """
    def __init__(self, config, maxCpus, maxMemory):
        AbstractBatchSystem.__init__(self, config, maxCpus, maxMemory) #Call the parent constructor
        if maxMemory != sys.maxint:
            logger.critical("A max memory has been specified for the parasol batch system class of %i, but currently this batchsystem interface does not support such limiting" % maxMemory)
        #Keep the name of the results file for the pstat2 command..
        self.parasolCommand = config.attrib["parasol_command"]
        self.parasolResultsFile = getParasolResultsFileName(config.attrib["job_tree"])
        #Reset the job queue and results (initially, we do this again once we've killed the jobs)
        self.queuePattern = re.compile("q\s+([0-9]+)")
        self.runningPattern = re.compile("r\s+([0-9]+)\s+[\S]+\s+[\S]+\s+([0-9]+)\s+[\S]+")
        self.killJobs(self.getIssuedJobIDs()) #Kill any jobs on the current stack
        logger.info("Going to sleep for a few seconds to kill any existing jobs")
        time.sleep(5) #Give batch system a second to sort itself out.
        logger.info("Removed any old jobs from the queue")
        #Reset the job queue and results
        exitValue = popenParasolCommand("%s -results=%s clear sick" % (self.parasolCommand, self.parasolResultsFile), False)[0]
        if exitValue != None:
            logger.critical("Could not clear sick status of the parasol batch %s" % self.parasolResultsFile)
        exitValue = popenParasolCommand("%s -results=%s flushResults" % (self.parasolCommand, self.parasolResultsFile), False)[0]
        if exitValue != None:
            logger.critical("Could not flush the parasol batch %s" % self.parasolResultsFile)
        open(self.parasolResultsFile, 'w').close()
        logger.info("Reset the results queue")
        #Stuff to allow max cpus to be work
        self.outputQueue1 = Queue()
        self.outputQueue2 = Queue()
        #worker = Thread(target=getUpdatedJob, args=(self.parasolResultsFileHandle, self.outputQueue1, self.outputQueue2))
        #worker.setDaemon(True)
        worker = Process(target=getUpdatedJob, args=(self.parasolResultsFile, self.outputQueue1, self.outputQueue2))
        worker.daemon = True
        worker.start()
        self.usedCpus = 0
        self.jobIDsToCpu = {}
         
    def issueJob(self, command, memory, cpu):
        """Issues parasol with job commands.
        """
        self.checkResourceRequest(memory, cpu)
        pattern = re.compile("your job ([0-9]+).*")
        parasolCommand = "%s -verbose -ram=%i -cpu=%i -results=%s add job '%s'" % (self.parasolCommand, memory, cpu, self.parasolResultsFile, command)
        #Deal with the cpus
        self.usedCpus += cpu
        while True: #Process finished results with no wait
            try:
               jobID = self.outputQueue1.get_nowait()
               self.usedCpus -= self.jobIDsToCpu.pop(jobID)
               assert self.usedCpus >= 0
               self.outputQueue1.task_done()
            except Empty:
                break
        while self.usedCpus > self.maxCpus: #If we are still waiting
            self.usedCpus -= self.jobIDsToCpu.pop(self.outputQueue1.get())
            assert self.usedCpus >= 0
            self.outputQueue1.task_done()
        #Now keep going
        while True:
            #time.sleep(0.1) #Sleep to let parasol catch up #Apparently unnecessary
            line = popenParasolCommand(parasolCommand)[1][0]
            match = pattern.match(line)
            if match != None: #This is because parasol add job will return success, even if the job was not properly issued!
                break
            else:
                logger.info("We failed to properly add the job, we will try again after a sleep")
                time.sleep(5)
        jobID = int(match.group(1))
        self.jobIDsToCpu[jobID] = cpu
        logger.debug("Got the parasol job id: %s from line: %s" % (jobID, line))
        logger.debug("Issued the job command: %s with (parasol) job id: %i " % (parasolCommand, jobID))
        return jobID
    
    def killJobs(self, jobIDs):
        """Kills the given jobs, represented as Job ids, then checks they are dead by checking
        they are not in the list of issued jobs.
        """
        while True:
            for jobID in jobIDs:
                exitValue = popenParasolCommand("%s remove job %i" % (self.parasolCommand, jobID), runUntilSuccessful=False)[0]
                logger.info("Tried to remove jobID: %i, with exit value: %i" % (jobID, exitValue))
            runningJobs = self.getIssuedJobIDs()
            if set(jobIDs).difference(set(runningJobs)) == set(jobIDs):
                return
            time.sleep(5)
            logger.critical("Tried to kill some jobs, but something happened and they are still going, so I'll try again")
    
    def getIssuedJobIDs(self):
        """Gets the list of jobs issued to parasol.
        """
        #Example issued job, first field is jobID, last is the results file
        #31816891 localhost  benedictpaten 2009/07/23 10:54:09 python ~/Desktop/out.txt
        issuedJobs = set()
        for line in popenParasolCommand("%s -extended list jobs" % self.parasolCommand)[1]:
            if line != '':
                tokens = line.split()
                if tokens[-1] == self.parasolResultsFile:
                    jobID = int(tokens[0])
                    issuedJobs.add(jobID)
        return list(issuedJobs)
#.........这里部分代码省略.........
开发者ID:ArtRand,项目名称:jobTree,代码行数:103,代码来源:parasol.py

示例4: MultiProcCompressTool

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import get_nowait [as 别名]
class MultiProcCompressTool(BaseCompressTool):

    _procs = None
    _np = 0
    _np_limit = 0
    _task_queues = None
    _result_queue = None


    def checkCpuLimit(self):
        if self.getOption("cpu_limit"):
            self._np_limit = int(self.getOption("cpu_limit"))
        self._np = cpu_count()
        if self._np_limit > 0:
            if self._np > self._np_limit:
                self._np = self._np_limit
        return self._np

    def init(self):

        self._procs = []
        self._task_queues = []

        self._np = self.checkCpuLimit()

        self._task_queue = JoinableQueue()
        self._result_queue = JoinableQueue()

        for n in range(self._np):
            tq = JoinableQueue()
            self._task_queues.append(tq)
            p = Process(target=self._worker, name="Compressor-%s" % n, args=(tq, self._result_queue,))
            p.start()
            self._procs.append(p)

        return self

    def stop(self):

        count = 50
        alive = True
        while alive:
            for n in range(self._np):
                tq = self._task_queues[ n ]
                tq.put_nowait("stop")

            sleep(0.1)

            alive = False
            for n in range(self._np):
                if self._procs[n].is_alive():
                    alive = True

            count -= 1
            if count <= 0:
                break

        for n in range(self._np):
            if self._procs[n].is_alive():
                self._procs[n].terminate()

        return self

    def _worker(self, in_queue, out_queue):
        """

        @param in_queue: {multiprocessing.JoinableQueue}
        @param out_queue: {multiprocessing.JoinableQueue}

        @var task: Task

        @return:
        """

        sleep_wait = 0.1

        while True:

            try:
                task = in_queue.get_nowait()
            except:
                task = None

            if task is None:
                sleep(sleep_wait)
                continue

            if type(task) is float:
                sleep_wait = task
                in_queue.task_done()
                sleep(sleep_wait)
                continue

            if type(task) is str and task == "stop":
                in_queue.task_done()
                break

            if type(task) is Task:
                result = Result()
                result.cdata, result.method = self._compressData(task.data)
#.........这里部分代码省略.........
开发者ID:carriercomm,项目名称:dedupsqlfs,代码行数:103,代码来源:mp.py


注:本文中的multiprocessing.JoinableQueue.get_nowait方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。