当前位置: 首页>>代码示例>>Python>>正文


Python JoinableQueue.close方法代码示例

本文整理汇总了Python中multiprocessing.JoinableQueue.close方法的典型用法代码示例。如果您正苦于以下问题:Python JoinableQueue.close方法的具体用法?Python JoinableQueue.close怎么用?Python JoinableQueue.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.JoinableQueue的用法示例。


在下文中一共展示了JoinableQueue.close方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: readCEFFile

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import close [as 别名]
def readCEFFile(afile,pygtail):
    if exists(afile): #sometimes files can move/archive while we iterate the list
        try:
            #start a process to post our stuff.
            logcache=JoinableQueue()
            postingProcess=Process(target=postLogs,args=(logcache,),name="cef2mozdefHTTPPost")
            postingProcess.start()            
            #have pygtail feed us lines 
            for line in pygtail:
                pygtail._update_offset_file()
                cefDict=parseCEF(line)
                #logger.debug(json.dumps(cefDict))
                #append json to the list for posting
                if cefDict is not None:
                    logcache.put(json.dumps(cefDict))        
            logger.info('{0} done'.format(afile))
            logger.info('waiting for posting to finish')
            logcache.put(None)
            logcache.close()
            #logger.info('posting done')
        except KeyboardInterrupt:
            sys.exit(1)
        except ValueError as e:
            logger.fatal('Exception while handling CEF message: %r'%e)
            sys.exit(1)    
开发者ID:DjDarthyGamer,项目名称:MozDef,代码行数:27,代码来源:cefFile2Mozdef.py

示例2: crunch

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import close [as 别名]
def crunch(file_name, ext_type, handler, pool_size=4, queue_size=40,
           limit=None):

    print 'Crunching file: %s, limit: %s' % (file_name, limit)

    q = JoinableQueue(queue_size)
    q_feats = Queue()

    pool = Pool(pool_size, wrap_handler(handler), ((q, q_feats),))

    with file_reader(file_name) as reader:
        idx = 0
        for entry in reader:

            if (entry.pathname.find(ext_type) != -1):
                text = [b for b in entry.get_blocks()]
                key = entry.pathname.split('/')[-1].split('.')[0]

                q.put((key, text), True)
                idx += 1

                print 'Processing:', entry.pathname, idx

                if limit and idx >= limit:
                    print 'Reached the limit'
                    break

        q.close()
        q.join()
        pool.close()

    result = []
    for i in range(q_feats.qsize()):
        result.append(q_feats.get())
    return result
开发者ID:Anhmike,项目名称:kaggle-malware-classification,代码行数:37,代码来源:sevenz_cruncher.py

示例3: queueManager

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import close [as 别名]
def queueManager(numProc, myList, function, *args):
	'''queueManager(numProc, myList, function, *args):
	generic function used to start worker processes via the multiprocessing Queue object
	numProc - number of processors to use
	myList - a list of objects to be iterated over
	function - target function
	*args - additional arguments to pass to function

	Return - an unordered list of the results from myList
	'''
	qIn = Queue()
	qOut = JoinableQueue()
	if args:
		arguments = (qIn, qOut,) + args
	else:
		arguments = (qIn, qOut,)
	results = []
	
	# reduce processer count if proc count > files
	
	i = 0
	for l in myList:
		qIn.put((i,l))
		i += 1

	for _ in range(numProc):
		p = Process(target = function, args = arguments).start()
	sys.stdout.write("Progress: {:>3}%".format(0)
)
	curProgress = 0
	lastProgress = 0
	while qOut.qsize() < len(myList):
		#sys.stdout.write("\b\b\b\b{:>3}%".format(int(ceil(100*qOut.qsize()/len(myList)))))
		curProgress = int(ceil(100*qOut.qsize()/len(myList)))
		if curProgress - lastProgress > 10:
			lastProgress += 10
			sys.stdout.write("\nProgress: {:>3}%".format(lastProgress))
			sys.stdout.flush()
	sys.stdout.write("\nProgress: {:>3}%".format(100))
	#sys.stdout.write("\b\b\b\b{:>3}%".format(100))
	sys.stdout.write("\n")
	for _ in range(len(myList)):
		# indicate done results processing
		results.append(qOut.get())
		qOut.task_done()
	#tell child processes to stop
	for _ in range(numProc):
		qIn.put('STOP')

	orderedRes = [None]*len(results)
	for i, res in results:
		orderedRes[i] = res

	qOut.join()

	qIn.close()
	qOut.close()
	return orderedRes
开发者ID:lwwalker,项目名称:kloman3D,代码行数:60,代码来源:kern3Dv05.py

示例4: main

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import close [as 别名]
def main():
    print("______POPULATE FEATURE NAMES START__")
    populateFeatureNames(trainFile)
    print("___POPULATE FEATURE NAMES ENDS__")

    ################
    print("building Queue start")
    q = JoinableQueue(20)
    q_feats = Queue()
    print("building Queue end")
    print("building pool start")
    pool = Pool(16, populateFeatures, ((q, q_feats),))
    print("buiding pool ends")
    returnedList = []
    print("onlyfiles start")
    onlyfiles = [f for f in os.listdir(path) if ".asm" in f]
    print("onlyfiles")
    print(onlyfiles)
    print("onlyfiles ends")
    print("___FEATURE EXTRACTION STARTS FOR PATH__")
    for ffile in onlyfiles:
        q.put((ffile, path))
    start = time.asctime(time.localtime(time.time()))
    print("Start Time : " + start)
    q.close()
    print("Q closed")
    start = time.asctime(time.localtime(time.time()))
    print("Start Time : " + start)
    # time.sleep(100)
    q.join()
    print("Q joined")
    start = time.asctime(time.localtime(time.time()))
    print("Start Time : " + start)
    # time.sleep(100)
    pool.close()
    print("Pool closed")
    start = time.asctime(time.localtime(time.time()))
    print("Start Time : " + start)

    for i in range(q_feats.qsize()):
        returnedList.append(q_feats.get())
        # returnedList=p.map(functools.partial(populateFeatures, filePath=path), onlyfiles)
        # time.sleep(10)
        # p.close()
        # time.sleep(100)
        # p.join()
        # time.sleep(10)
    print("___ PROCESSING OUTPUT OF MAP FUNCTION FOR FEATURE_EXTRACTION STARTS___")
    # except:
    # 	print("Something went wrong")
    generateHeader()
    generateFeatures(returnedList)

    print("_____ PROCESSING OUTPUT OF MAP FUNCTION FOR FEATURE_EXTRACTION ENDS____")
    print("_____FEATURE EXTRACTION ENDS____")
开发者ID:killbug2004,项目名称:Data-Mining-Project---Microsoft_Malware_Challenge,代码行数:57,代码来源:test.py

示例5: Mothership

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import close [as 别名]
class Mothership(object):

    """ Monitor of producer and consumers """

    def __init__(self, producer, consumers, graceful=False):
        self._queue = JoinableQueue()

        self._producer_proxy = ProducerProxy(self._queue, producer)
        self._consumer_pool = list(ConsumerProxy(self._queue, cons) for cons in consumers)
        self._graceful = graceful

    def start(self):
        try:
            """ Start working """
            logger.info('Starting Producers'.center(20, '='))
            self._producer_proxy.start()

            time.sleep(0.1)

            logger.info('Starting Consumers'.center(20, '='))
            for consumer in self._consumer_pool:
                consumer.start()

            self._producer_proxy.join()
            self._queue.join()
            for consumer in self._consumer_pool:
                consumer.join()

            self._queue.close()

        except KeyboardInterrupt:

            self._producer_proxy.stop()
            self._producer_proxy.join()

            if self._graceful:
                logger.info('Shutting Down gracefully...')
                self._queue.join()

            for consumer in self._consumer_pool:
                consumer.stop()
                consumer.join()

            self._queue.close()

    def __enter__(self):
        return self

    def __exit__(self, types, value, tb):
        return
开发者ID:GlieseRay,项目名称:qworker,代码行数:52,代码来源:prodcons.py

示例6: __iter__

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import close [as 别名]
	def __iter__(self):
		queue = JoinableQueue(maxsize=self.max_queue_size)

		n_batches, job_queue = self._start_producers(queue)

		# Run as consumer (read items from queue, in current thread)
		for x in xrange(n_batches):
			item = queue.get()
			#print queue.qsize(), "GET"
			yield item # Yield the item to the consumer (user)
			queue.task_done()

		queue.close()
		job_queue.close()
开发者ID:gzuidhof,项目名称:luna16,代码行数:16,代码来源:parallel.py

示例7: KnowledgeBase

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import close [as 别名]
class KnowledgeBase(Daemon):

    def __init__(self, config):
        set_logging(config)
        self.config = config
        self.pidfile = os.path.abspath(config['pidfile'])
        self.time_lock = Lock()
        self.teller_queue = JoinableQueue()
        self.session_factory = get_sasession(self.config)
        session = self.session_factory()

    def run(self):
        if int(self.config['instant_duration']):
            self.clock = Ticker(self.config, self.session_factory(),
                                self.time_lock, self.teller_queue)
            self.clock.start()

        host = self.config['kb_host']
        port = int(self.config['kb_port'])
        nproc = int(self.config['teller_processes'])
        for n in range(nproc):
            teller = Teller(self.config, self.session_factory, self.teller_queue)
            teller.daemon = True
            teller.start()
        self.socket = Listener((host, port))
        while True:
            try:
                client = self.socket.accept()
            except InterruptedError:
                return
            self.time_lock.acquire()
            self.teller_queue.put(client)
            self.time_lock.release()

    def cleanup(self, signum, frame):
        """cleanup tasks"""
        nproc = int(self.config['teller_processes'])
        for n in range(nproc):
            self.teller_queue.put(None)
        self.teller_queue.close()
        try:
            self.clock.ticking = False
        except AttributeError:
            pass
        self.teller_queue.join()
        try:
            self.clock.join()
        except AttributeError:
            pass
        logger.warn('bye from {n}, received signal {p}'.format(n=mp.current_process().name, p=str(signum)))
开发者ID:enriquepablo,项目名称:terms,代码行数:52,代码来源:kb.py

示例8: __iter__

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import close [as 别名]
	def __iter__(self):
		queue = JoinableQueue(maxsize=params.N_PRODUCERS*2)

		n_batches, job_queue = self.start_producers(queue)

		# Run as consumer (read items from queue, in current thread)
		for x in xrange(n_batches):
			item = queue.get()
			#print len(item[0]), queue.qsize(), "GET"
			yield item
			queue.task_done()

		#queue.join() #Lock until queue is fully done
		queue.close()
		job_queue.close()
开发者ID:StevenReitsma,项目名称:kaggle-diabetic-retinopathy,代码行数:17,代码来源:iterators.py

示例9: Multiplexer

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import close [as 别名]
class Multiplexer(object):
    def __init__(self, worker, writer, threads=4):
        self.worker=worker
        self.writer=writer
        self.q=JoinableQueue()
        self.done = Value(c_bool,False)
        self.consumer=Process(target=self.consume)
        self.pool = Pool(threads, init_opener)

    def start(self):
        self.done.value=False
        self.consumer.start()

    def addjob(self, url, data=None):
        params=[url]
        if data: params.append(data)
        try:
           return self.pool.apply_async(self.worker,params,callback=self.q.put)
        except:
            logger.error('[!] failed to scrape '+ url)
            logger.error(traceback.format_exc())
            raise

    def finish(self):
        self.pool.close()
        logger.info('closed pool')
        self.pool.join()
        logger.info('joined pool')
        self.done.value=True
        self.q.close()
        logger.info('closed q')
        self.consumer.join()
        logger.info('joined consumer')
        #self.q.join()
        #logger.info('joined q')

    def consume(self):
        param=[0,0]
        while True:
            job=None
            try:
                job=self.q.get(True, timeout=1)
            except Empty:
                if self.done.value==True: break
            if job:
                param = self.writer(job, param)
                self.q.task_done()
        logger.info('added/updated: %s' % param)
开发者ID:ehj,项目名称:parltrack,代码行数:50,代码来源:utils.py

示例10: main

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import close [as 别名]
def main():
    workers=cpu_count()
    line_queue=JoinableQueue(workers*2) # Keep at most 2*workers lines in flight
    input_file=open(sys.argv[1], 'rU')
    output_file=open(sys.argv[2], 'w')
    output_queue=Queue(workers*3)

    processes=[]
    for i in xrange(workers):
        this_process=Process(target=process_queue,
                             args=(line_queue, output_queue, LINES_AT_ONCE))
        this_process.start()
        processes.append(this_process)

    # Start the output processor
    output_processor=Process(target=retrieve_output, 
                             args=(output_queue, output_file, LINES_AT_ONCE))
    output_processor.start()

    small_queue=[]
    block_number=0
    for l in input_file:
        small_queue.append(l)
        if len(small_queue)>=LINES_AT_ONCE:
            line_queue.put((block_number, small_queue))
            block_number+=1
            small_queue=[]
    if len(small_queue)>0:
        line_queue.put((block_number, small_queue))
        
    for i in xrange(workers):
        line_queue.put('STOP')
    
    print "Waiting for all tasks to end."
    line_queue.close()
    line_queue.join()
    for p in processes:
        p.join()
    
    print "All tasks ended. Dumping the final output."
    output_queue.put(None)
    output_queue.close()
    output_processor.join()
    
    print "Done. Exiting."
    output_file.close()
    
    return
开发者ID:jherskovic,项目名称:metamap_tools,代码行数:50,代码来源:process_via_metamap.py

示例11: main

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import close [as 别名]
def main():
    processes = cpu_count() * 2
    queue = JoinableQueue()
    get_links(queue)
    create_folder()

    for i in range(processes):
        # .start() - Not sure what that actually returns....
        p = Process(target=save_image, args=(queue,))
        p.start()

    for i in range(processes):
        queue.put(None) ## Tell the processes to end

    queue.join()
    queue.close()
开发者ID:MerreM,项目名称:SNscraper,代码行数:18,代码来源:scraper.py

示例12: qwork

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import close [as 别名]
def qwork(command_file, nproc):
    """Queue up commands to run in parallel."""

    print("Queuing work using %d processes...\n" % nproc)
    queue = JoinableQueue()

    for command in command_file:
        queue.put(command.decode('utf8').rstrip('\n'))

    for ii in range(nproc):
        Runner(queue)

    queue.join()
    queue.close()

    print("\n...done!")
开发者ID:smutch,项目名称:qwork,代码行数:18,代码来源:qwork.py

示例13: clear_area_around_eye

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import close [as 别名]
def clear_area_around_eye(size = 256, image_dir = 'I:/AI_for_an_eyes/test/test/', target_dir = 'I:/AI_for_an_eyes/test/test_zonder_meuk_256/'):
    if not os.path.exists(target_dir):
        os.makedirs(target_dir)

    util.update_progress(0)


    tasks = glob.glob(image_dir+'*.jpeg')
    job_total = len(tasks)

    print 'Processing images matching ' + image_dir+ '*.jpeg'

    jobs = Queue()
    result = JoinableQueue()
    NUMBER_OF_PROCESSES = cpu_count()*2

    for im_name in tasks:
        jobs.put(im_name)

    for i in xrange(NUMBER_OF_PROCESSES):
        p = Thread(target=worker, args=(i, jobs, result, target_dir, size))
        p.daemon = True
        p.start()

    print 'Starting workers (', NUMBER_OF_PROCESSES, ')!'

    n_complete = 0
    for t in xrange(len(tasks)):
        r = result.get()
        n_complete += 1
        util.update_progress(n_complete/job_total)
        result.task_done()
        #print t, 'done'

    for w in xrange(NUMBER_OF_PROCESSES):
        jobs.put(None)

    util.update_progress(1)

    print 'Done!'
    time.sleep(1)
    result.join()
    jobs.close()
    result.close()
开发者ID:StevenReitsma,项目名称:kaggle-diabetic-retinopathy,代码行数:46,代码来源:clear_area_around_eye_parallel.py

示例14: process_task

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import close [as 别名]
def process_task(num_workers):
    logging.info("Started")

    task_queue = JoinableQueue()
    done_queue = Queue()

    def worker(name):
        """
        represents an 'expensive' task
        """
        logging.info("Started process : %s" % name)
        for task in iter(task_queue.get, 'Stop'):
            done_queue.put(task)
            time.sleep(1)
            task_queue.task_done()
        # This is for the poison pill task
        task_queue.task_done()
        logging.info("Done process : %s" % name)

    # First we start the workers, and give them a list that we can look at after
    for i in range(num_workers):
        Process(target=worker, args=("P-%s" % (i+1), )).start()

    # Now the main thread populates the Queue
    num_tasks = num_workers * 5
    for i in range(num_tasks):
        task_queue.put(i)

    # Now, administer the poison pill which tells processes that we are done populating the Q
    for i in range(num_workers):
        task_queue.put('Stop')

    # Now wait for workers to finish their work
    task_queue.close()
    task_queue.join()

    logging.info("Workers are done")

    # Now verify that all tasks are done by seeing them in the done queue
    done_queue.put('Stop')
    done_tasks = [task for task in iter(done_queue.get, 'Stop')]
    assert len(done_tasks) == num_tasks
    logging.info("Verified work - done!")
开发者ID:ofreshy,项目名称:parallel,代码行数:45,代码来源:q_with_workers.py

示例15: parallel_for

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import close [as 别名]
def parallel_for(a, cls, args=[], kwargs={}, num_processes=None):
    from multiprocessing import Process, JoinableQueue, cpu_count, Pipe
    if num_processes is None:
        num_processes = cpu_count()
    # Note that JoinableQueue uses an integer for tracking locations in the queue.
    # Because it's using shared memory it's not terribly flexible and gives annoyingly
    # unclear errors if you go over the limit. We'd like the queue to be as large as
    # possible so that we can avoid contention, but without allocating a max possible
    # size queue unless we need it, thus the calculation below. 32767 is a hard limit.
    q = JoinableQueue(maxsize=min(len(a)+num_processes, 2**15 - 1))

    output_pipes = [Pipe(duplex=False) for _ in range(num_processes)]
    send_pipes = [p for _, p in output_pipes]
    recv_pipes = [p for p, _ in output_pipes]
    pool = [Process(target=_parallel_for, args=(q, cls, pipe) + tuple(args), kwargs=kwargs)
            for pipe in send_pipes]
    output_watcher = MultiPipeWatcher(recv_pipes)
    try:
        for p in pool:
            p.start()
        output_watcher.start()
        for x in a:
            q.put(x)
        for _ in range(num_processes):
            q.put(None) # End markers
        q.close()
        q.join_thread()
        q.join()
        for p in pool:
            p.join()
        output_watcher.flush()
        output_watcher.join()
        combined_output = output_watcher.merged
        return combined_output
    except KeyboardInterrupt:
        print "Interrupted -- terminating worker processes"
        for p in pool:
            p.terminate()
        for p in pool:
            p.join()
        raise
开发者ID:bodylabs,项目名称:baiji,代码行数:43,代码来源:parallel.py


注:本文中的multiprocessing.JoinableQueue.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。