当前位置: 首页>>代码示例>>Python>>正文


Python JoinableQueue.task_done方法代码示例

本文整理汇总了Python中multiprocessing.JoinableQueue.task_done方法的典型用法代码示例。如果您正苦于以下问题:Python JoinableQueue.task_done方法的具体用法?Python JoinableQueue.task_done怎么用?Python JoinableQueue.task_done使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.JoinableQueue的用法示例。


在下文中一共展示了JoinableQueue.task_done方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import task_done [as 别名]
def main():
    jobs = JoinableQueue()
    result = JoinableQueue()


    numToProcess = -1
    scores = pd.DataFrame(columns=['query','fmeasure','precision','recall',
                                   'size','maxDistance','topHits',"contextSteps"])

    print len(datasets)

    for key in datasets:
        jobs.put(key)

    processed_count = Counter()
        
    for i in xrange(NUMBER_OF_PROCESSES):
        p = Process(target=work, args=(i, jobs, result, processed_count))
        p.daemon = True
        p.start()

    #work(1, jobs, result, processed_count)

    automated_annotations = {}
    distances = {}

    jobs.join()

    dataset_index = collections.defaultdict(set)
    annotated_datasets = set()
    while not result.empty():
        dataset, classes = result.get()
        if len(classes) == 0:
            annotated_datasets.add(dataset)
        for c in classes.keys():
            dataset_index[c].add(dataset)
            owl_class = Class(c, graph=graph)
            for parent in owl_class.parents:
                dataset_index[parent.identifier].add(dataset)
        result.task_done()

    print '\n'
    
    for query, c in queries.items():
        manual = ground_truth[query]
        automated = dataset_index[c]
        hits = manual & automated
        misses = manual - automated
        precision = np.nan if len(automated) == 0 else float(len(hits)) / len(automated)
        recall = np.nan if len(manual) == 0 else float(len(hits)) / len(manual)
        if precision != 0 or recall != 0:
            fmeasure = 0 if np.isnan(precision) or np.isnan(recall) else 2 * (precision * recall) / (precision + recall)
        else:
            fmeasure = 0
        scores = scores.append(dict(query=query, size=len(manual), precision=precision, recall=recall, fmeasure=fmeasure,topHits=topHits, maxDistance=maxDistance, contextSteps = context_steps),
                        ignore_index=True)
        print "Hits for", query, c
        print '\n'.join(sorted(hits))
    print scores
    print "Annotated", len(annotated_datasets), "datasets."
开发者ID:tetherless-world,项目名称:linkipedia,代码行数:62,代码来源:dataone_ontology_matching_by_query.py

示例2: InternalSet

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import task_done [as 别名]
def InternalSet(Achild:Queue, Bchild:Queue, outqueue:Queue):
    """Take the output of two LeafSet's and take the union."""
    logger = multiprocessing.log_to_stderr()
    logger.setLevel(logging.INFO)
    AminusB = set()
    BminusA = set()
    morestuff = True
    while morestuff:
        a = Achild.get()
        b = Bchild.get()
        logger.info("Internal:%s:%s" % (a, b))
        if a in BminusA:
            BminusA.remove(a)
        elif a not in AminusB:
            AminusB.add(a)
            outqueue.put(a)
        if b in AminusB:
            AminusB.remove(b)
        elif b not in BminusA:
            BminusA.add(b)
            outqueue.put(b)
        Achild.task_done()
        Bchild.task_done()
        if (a == SIGOBJ) or (b == SIGOBJ):
            outqueue.put(SIGOBJ)
            morestuff = False
    logger.info("internal done")
开发者ID:jpfairbanks,项目名称:streaming,代码行数:29,代码来源:HierSetFilter.py

示例3: __init__

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import task_done [as 别名]
class AlarmExecutor:
    def __init__(self):
        self.queue = JoinableQueue(10)
        self.running = False
        self.t = Thread(target=self._run, name="AlarmExecutor")

    def _run(self):
        while self.running:
            try:
                alarm = self.queue.get(block=True, timeout=1)
                alarm.execute() 
                logging.debug("Alarm executed")
                self.queue.task_done()       
            except Queue.Empty:
                continue
            
    def start(self):
        logging.debug("Starting alarm executor")
        self.running = True
        self.t.start()

    def stop(self):
        if self.running:
            logging.debug("Stoppping alarm executor")
            self.running = False
            self.t.join()
        else:
            logging.debug("Attempted to stop alarm executor when it is not running")
开发者ID:RaveGun,项目名称:redeem,代码行数:30,代码来源:Alarm.py

示例4: main

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import task_done [as 别名]
def main():
    from multiprocessing import JoinableQueue
    from genmod.vcf import vcf_header
    from genmod.utils import annotation_parser
    parser = argparse.ArgumentParser(description="Parse different kind of pedigree files.")
    parser.add_argument('variant_file', type=str, nargs=1 , help='A file with variant information.')
    parser.add_argument('annotation_file', type=str, nargs=1 , help='A file with feature annotations.')
    parser.add_argument('-phased', '--phased', action="store_true", help='If variant file is phased.')    
    parser.add_argument('-v', '--verbose', action="store_true", help='Increase output verbosity.')
    
    args = parser.parse_args()
    infile = args.variant_file[0]
    if args.verbose:
        print('Parsing annotationfile...')
        start_time_annotation = datetime.now()
    my_anno_parser = annotation_parser.AnnotationParser(args.annotation_file[0], 'ref_gene')
    
    if args.verbose:
        print('annotation parsed. Time to parse annotation: %s\n' % str(datetime.now() - start_time_annotation))
    
    my_head_parser = vcf_header.VCFParser(infile)
    my_head_parser.parse()
    print(my_head_parser.__dict__)
    variant_queue = JoinableQueue()
    start_time = datetime.now()        
    
    my_parser = VariantFileParser(infile, variant_queue, my_head_parser, my_anno_parser, args)
    nr_of_batches = my_parser.parse()
    print(nr_of_batches)
    for i in range(nr_of_batches):
        variant_queue.get()
        variant_queue.task_done()
    
    variant_queue.join()
    print('Time to parse variants: %s ' % str(datetime.now()-start_time))
开发者ID:tzhughes,项目名称:genmod,代码行数:37,代码来源:vcf_parser.py

示例5: __init__

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import task_done [as 别名]
class QueueTask:
    def __init__(self):
        self.queue = JoinableQueue()
        self.event = Event()
        atexit.register( self.queue.join )

        process = Process(target=self.work)
        process.daemon = True
        process.start()


    def work(self):
        while True:
            func, args, wait_for = self.queue.get()

            for evt in wait_for: 
                evt.wait()
            func(*args)
            self.event.set()

            self.queue.task_done()


    def enqueue(self, func, args=[], wait_for=[]):
        self.event.clear()
        self.queue.put( (func, args, wait_for) )

        return self.event 
开发者ID:wbkifun,项目名称:fdtd_accelerate,代码行数:30,代码来源:queue_multiprocessing_test.py

示例6: queueManager

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import task_done [as 别名]
def queueManager(numProc, myList, function, *args):
	'''queueManager(numProc, myList, function, *args):
	generic function used to start worker processes via the multiprocessing Queue object
	numProc - number of processors to use
	myList - a list of objects to be iterated over
	function - target function
	*args - additional arguments to pass to function

	Return - an unordered list of the results from myList
	'''
	qIn = Queue()
	qOut = JoinableQueue()
	if args:
		arguments = (qIn, qOut,) + args
	else:
		arguments = (qIn, qOut,)
	results = []
	
	# reduce processer count if proc count > files
	
	i = 0
	for l in myList:
		qIn.put((i,l))
		i += 1

	for _ in range(numProc):
		p = Process(target = function, args = arguments).start()
	sys.stdout.write("Progress: {:>3}%".format(0)
)
	curProgress = 0
	lastProgress = 0
	while qOut.qsize() < len(myList):
		#sys.stdout.write("\b\b\b\b{:>3}%".format(int(ceil(100*qOut.qsize()/len(myList)))))
		curProgress = int(ceil(100*qOut.qsize()/len(myList)))
		if curProgress - lastProgress > 10:
			lastProgress += 10
			sys.stdout.write("\nProgress: {:>3}%".format(lastProgress))
			sys.stdout.flush()
	sys.stdout.write("\nProgress: {:>3}%".format(100))
	#sys.stdout.write("\b\b\b\b{:>3}%".format(100))
	sys.stdout.write("\n")
	for _ in range(len(myList)):
		# indicate done results processing
		results.append(qOut.get())
		qOut.task_done()
	#tell child processes to stop
	for _ in range(numProc):
		qIn.put('STOP')

	orderedRes = [None]*len(results)
	for i, res in results:
		orderedRes[i] = res

	qOut.join()

	qIn.close()
	qOut.close()
	return orderedRes
开发者ID:lwwalker,项目名称:kloman3D,代码行数:60,代码来源:kern3Dv05.py

示例7: WorkerQueue

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import task_done [as 别名]
class WorkerQueue(object):

    def __init__(self, num_workers = 20):
        self.queue = Queue()
        self.pool = []
        self._setup_workers(num_workers)

    def _setup_workers(self, num_workers):
        """ Sets up the worker threads
              NOTE: undefined behaviour if you call this again.
        """
        self.pool = []

        for _ in range(num_workers):
            self.pool.append(Thread(target=self.threadloop))

        for a_thread in self.pool:
            a_thread.setDaemon(True)
            a_thread.start()


    def do(self, f, *args, **kwArgs):
        """ puts a function on a queue for running later.
        """
        self.queue.put((f, args, kwArgs))


    def stop(self):
        """ Stops the WorkerQueue, waits for all of the threads to finish up.
        """
        self.queue.put(STOP)
        for thread in self.pool:
            thread.join()


    def threadloop(self): #, finish = False):
        """ Loops until all of the tasks are finished.
        """
        while True:
            args = self.queue.get()
            if args is STOP:
                self.queue.put(STOP)
                self.queue.task_done()
                break
            else:
                try:
                    args[0](*args[1], **args[2])
                finally:
                    # clean up the queue, raise the exception.
                    self.queue.task_done()
                    #raise


    def wait(self):
        """ waits until all tasks are complete.
        """
        self.queue.join()
开发者ID:123jefferson,项目名称:MiniBloq-Sparki,代码行数:59,代码来源:__init__.py

示例8: worker_func

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import task_done [as 别名]
def worker_func(in_queue: JoinableQueue, out_queue: Queue, hr_t, tr_h):
    while True:
        dat = in_queue.get()
        if dat is None:
            in_queue.task_done()
            continue
        testing_data, head_pred, tail_pred = dat
        out_queue.put(test_evaluation(testing_data, head_pred, tail_pred, hr_t, tr_h))
        in_queue.task_done()
开发者ID:bxshi,项目名称:ProjE,代码行数:11,代码来源:ProjE_sigmoid.py

示例9: apply_mt

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import task_done [as 别名]
    def apply_mt(self, xs, parallelism, **kwargs):
        """Run the UDF multi-threaded using python multiprocessing"""
        if snorkel_conn_string.startswith('sqlite'):
            raise ValueError('Multiprocessing with SQLite is not supported. Please use a different database backend,'
                             ' such as PostgreSQL.')

        # Fill a JoinableQueue with input objects
        in_queue = JoinableQueue()
        for x in xs:
            in_queue.put(x)

        # If the UDF has a reduce step, we collect the output of apply in a
        # Queue. This is also used to track progress via the the UDF sentinel
        out_queue = JoinableQueue()

        # Keep track of progress counts
        total_count = in_queue.qsize()
        count = 0

        # Start UDF Processes
        for i in range(parallelism):
            udf = self.udf_class(in_queue=in_queue, out_queue=out_queue,
                add_to_session=(self.reducer is None), **self.udf_init_kwargs)
            udf.apply_kwargs = kwargs
            self.udfs.append(udf)

        # Start the UDF processes, and then join on their completion
        for udf in self.udfs:
            udf.start()

        while any([udf.is_alive() for udf in self.udfs]) and count < total_count:
            y = out_queue.get()

            # Update progress whenever an item was processed
            if y == UDF.TASK_DONE_SENTINEL:
                count += 1
                if self.pb is not None:
                    self.pb.update(1)

            # If there is a reduce step, do now on this thread
            elif self.reducer is not None: 
                self.reducer.reduce(y, **kwargs)
                out_queue.task_done()

            else:
                raise ValueError("Got non-sentinel output without reducer.")

        if self.reducer is None:
            for udf in self.udfs:
                udf.join()
        else:
            self.reducer.session.commit()
            self.reducer.session.close()

        # Flush the processes
        self.udfs = []
开发者ID:HazyResearch,项目名称:snorkel,代码行数:58,代码来源:udf.py

示例10: Analyzer

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import task_done [as 别名]
class Analyzer(object):
	def __init__(self, data_root, working_dir, tpr, index=True, index_output='index.h5'):
		# list of analysis objects
		self.__analyses = []
		self.__working_dir = working_dir
		self.__fs = file_system.SH3FileSystem(data_root, index=True, index_output=index_output)
		self.__loader = loader.Loader(working_dir)
		self.__task_queue = JoinableQueue(8)
		self.__tpr = tpr

	def run(self):
		# start a queue of size max 8, block if no empty slots
		# populate the task queue with (analysis, xtc) items 
		for i in range(0, 8):
			p = Process(target=self.__worker)
			p.start()

		for batch in self.__fs.xtc_files():
			print "batch", batch
			for xtc in batch:
				for analysis in self.__analyses:
					print "queuing", analysis.name(), "and", xtc.name()
					self.__task_queue.put([analysis, xtc], True, None)

			print "waiting for these tasks to finish"
			self.__task_queue.join()
			print "tasks have finished"

			print "PID", os.getpid(), "loading analysis"
			for xtc in batch:
				for a in self.__analyses:
					self.__loader.load(a, xtc)	

	def add(self, analysis):
		self.__analyses.append(analysis)
	
	def remove(self, analysis):
		self.__analyses.append(analysis)

	def __worker(self):
		# TODO: use pool because it looks like the processes sometimes don't die if it fails
		# get one item from queue
		# block if queue is empty
		while True:
			try:
				# timeout after 30 seconds
				analysis,xtc = self.__task_queue.get(True, 30)
			except Empty:
				break
			else:
				analysis.run(xtc, self.__tpr)
				self.__task_queue.task_done()
开发者ID:graceli,项目名称:labwork,代码行数:54,代码来源:analysis.py

示例11: worker

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import task_done [as 别名]
def worker(input_q: JoinableQueue, output: Queue):
    from django import db
    db.connection.close()
    while True:
        task = input_q.get()
        if task == "end":
            break
        html = urlOpen.get_html(task.url)
        if html:
            text = textParser.get_text_from_html(html)
        input_q.task_done()
        # info()
        output.put(task.url)
    print("exit")
开发者ID:aregina,项目名称:news_project,代码行数:16,代码来源:multiprocessingTest.py

示例12: __iter__

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import task_done [as 别名]
	def __iter__(self):
		queue = JoinableQueue(maxsize=self.max_queue_size)

		n_batches, job_queue = self._start_producers(queue)

		# Run as consumer (read items from queue, in current thread)
		for x in xrange(n_batches):
			item = queue.get()
			#print queue.qsize(), "GET"
			yield item # Yield the item to the consumer (user)
			queue.task_done()

		queue.close()
		job_queue.close()
开发者ID:gzuidhof,项目名称:luna16,代码行数:16,代码来源:parallel.py

示例13: main

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import task_done [as 别名]
def main():
    jobs = JoinableQueue()
    result = JoinableQueue()


    print len(datasets)
    numToProcess = 10
    scores = pd.DataFrame(columns=['precision','recall','fmeasure',
                                   'numResult','minScore','topHits',
                                   'contentWeight','relationWeight'])
    manual_annotations = get_manual_annotations(numToProcess)
    manual_tuples = get_ir_tuples(manual_annotations)

    for key in manual_annotations.keys():
        jobs.put(key)

    processed_count = Counter()
        
    for i in xrange(NUMBER_OF_PROCESSES):
        p = Process(target=work, args=(i, jobs, result, processed_count))
        p.daemon = True
        p.start()

    #work(1, jobs, result, processed_count)

    automated_annotations = {}

    jobs.join()

    while not result.empty():
        dataset, classes = result.get()
        automated_annotations[dataset] = classes
        result.task_done()

    automated_tuples = get_ir_tuples(automated_annotations)
    hits = manual_tuples & automated_tuples
    misses = manual_tuples - automated_tuples
    
    precision = float(len(hits)) / len(automated_tuples)
    recall = float(len(hits)) / len(manual_tuples)
    fmeasure = 2 * (precision * recall) / (precision + recall)
    # print '\t'.join([str(x) for x in [precision, recall, fmeasure,
    #                              numResult, minScore, topHits]])
    scores = scores.append(dict(precision=precision, recall=recall, fmeasure=fmeasure,
                                numResult=numResult, minScore=minScore, topHits=topHits,
                                contentWeight=contentWeight, relationWeight=relationWeight),
                        ignore_index=True)

    print scores
开发者ID:tetherless-world,项目名称:linkipedia,代码行数:51,代码来源:dataone_entity_linking_eml.py

示例14: __init__

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import task_done [as 别名]
class ImageCrawler:
    
    NUM_PER_FETCH = 100
    NUM_PROCESSES = 10
    def __init__(self, database_config_path):
        self.queue = JoinableQueue()
        self.logger = Logger("image_crawler")
        self.adapter = ImageStoreAdapter(database_config_path, self.logger)
        
    def produce(self):
        while True:
            if self.queue.empty():
                for image_id, link in self.adapter.load_undownloaded_images(self.NUM_PER_FETCH):
                    self.logger.log("Producer: add new image to crawl:" + image_id + " " + link)
                    self.queue.put((image_id, link))
            time.sleep(10)
            
    def consume(self, process_id):
        while True:
            self.logger.log("Consumer process:" + str(process_id) + " fetch new image from queue")
            if not self.queue.empty():
                image_id, link = self.queue.get()
                self.logger.log("Consumer process:"+ str(process_id) + " start crawling " + str(link))
                image = common_utils.page_crawl(link)
                if image != None:
                    self.logger.log(link + "crawled successfully")
                    self.adapter.store_image(image_id, image)
                else:
                    self.logger.log(link + " failed at crawling")
                    self.adapter.update_image_status(image_id, ImageIndexStatus.DOWNLOAD_FAILED)
                self.queue.task_done()
                time.sleep(1)
            else:
                self.logger.log("Queue empty")
                time.sleep(10)
    
    def run(self):
        producer = Process(target=self.produce)
        producer.start()
        consumers = []
        for i in range(self.NUM_PROCESSES):
            consumer = Process(target=self.consume, args=(i,))
            consumers.append(consumer)
            consumer.start()
        
        for consumer in consumers:
            consumer.join()
        producer.join()
        self.queue.join()
开发者ID:qrodoo-dev,项目名称:backend,代码行数:51,代码来源:image_crawler.py

示例15: __iter__

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import task_done [as 别名]
	def __iter__(self):
		queue = JoinableQueue(maxsize=params.N_PRODUCERS*2)

		n_batches, job_queue = self.start_producers(queue)

		# Run as consumer (read items from queue, in current thread)
		for x in xrange(n_batches):
			item = queue.get()
			#print len(item[0]), queue.qsize(), "GET"
			yield item
			queue.task_done()

		#queue.join() #Lock until queue is fully done
		queue.close()
		job_queue.close()
开发者ID:StevenReitsma,项目名称:kaggle-diabetic-retinopathy,代码行数:17,代码来源:iterators.py


注:本文中的multiprocessing.JoinableQueue.task_done方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。