当前位置: 首页>>代码示例>>Python>>正文


Python JoinableQueue.empty方法代码示例

本文整理汇总了Python中multiprocessing.JoinableQueue.empty方法的典型用法代码示例。如果您正苦于以下问题:Python JoinableQueue.empty方法的具体用法?Python JoinableQueue.empty怎么用?Python JoinableQueue.empty使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.JoinableQueue的用法示例。


在下文中一共展示了JoinableQueue.empty方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import empty [as 别名]
class ImageCrawler:
    
    NUM_PER_FETCH = 100
    NUM_PROCESSES = 10
    def __init__(self, database_config_path):
        self.queue = JoinableQueue()
        self.logger = Logger("image_crawler")
        self.adapter = ImageStoreAdapter(database_config_path, self.logger)
        
    def produce(self):
        while True:
            if self.queue.empty():
                for image_id, link in self.adapter.load_undownloaded_images(self.NUM_PER_FETCH):
                    self.logger.log("Producer: add new image to crawl:" + image_id + " " + link)
                    self.queue.put((image_id, link))
            time.sleep(10)
            
    def consume(self, process_id):
        while True:
            self.logger.log("Consumer process:" + str(process_id) + " fetch new image from queue")
            if not self.queue.empty():
                image_id, link = self.queue.get()
                self.logger.log("Consumer process:"+ str(process_id) + " start crawling " + str(link))
                image = common_utils.page_crawl(link)
                if image != None:
                    self.logger.log(link + "crawled successfully")
                    self.adapter.store_image(image_id, image)
                else:
                    self.logger.log(link + " failed at crawling")
                    self.adapter.update_image_status(image_id, ImageIndexStatus.DOWNLOAD_FAILED)
                self.queue.task_done()
                time.sleep(1)
            else:
                self.logger.log("Queue empty")
                time.sleep(10)
    
    def run(self):
        producer = Process(target=self.produce)
        producer.start()
        consumers = []
        for i in range(self.NUM_PROCESSES):
            consumer = Process(target=self.consume, args=(i,))
            consumers.append(consumer)
            consumer.start()
        
        for consumer in consumers:
            consumer.join()
        producer.join()
        self.queue.join()
开发者ID:qrodoo-dev,项目名称:backend,代码行数:51,代码来源:image_crawler.py

示例2: main

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import empty [as 别名]
def main():
    jobs = JoinableQueue()
    result = JoinableQueue()


    numToProcess = -1
    scores = pd.DataFrame(columns=['query','fmeasure','precision','recall',
                                   'size','maxDistance','topHits',"contextSteps"])

    print len(datasets)

    for key in datasets:
        jobs.put(key)

    processed_count = Counter()
        
    for i in xrange(NUMBER_OF_PROCESSES):
        p = Process(target=work, args=(i, jobs, result, processed_count))
        p.daemon = True
        p.start()

    #work(1, jobs, result, processed_count)

    automated_annotations = {}
    distances = {}

    jobs.join()

    dataset_index = collections.defaultdict(set)
    annotated_datasets = set()
    while not result.empty():
        dataset, classes = result.get()
        if len(classes) == 0:
            annotated_datasets.add(dataset)
        for c in classes.keys():
            dataset_index[c].add(dataset)
            owl_class = Class(c, graph=graph)
            for parent in owl_class.parents:
                dataset_index[parent.identifier].add(dataset)
        result.task_done()

    print '\n'
    
    for query, c in queries.items():
        manual = ground_truth[query]
        automated = dataset_index[c]
        hits = manual & automated
        misses = manual - automated
        precision = np.nan if len(automated) == 0 else float(len(hits)) / len(automated)
        recall = np.nan if len(manual) == 0 else float(len(hits)) / len(manual)
        if precision != 0 or recall != 0:
            fmeasure = 0 if np.isnan(precision) or np.isnan(recall) else 2 * (precision * recall) / (precision + recall)
        else:
            fmeasure = 0
        scores = scores.append(dict(query=query, size=len(manual), precision=precision, recall=recall, fmeasure=fmeasure,topHits=topHits, maxDistance=maxDistance, contextSteps = context_steps),
                        ignore_index=True)
        print "Hits for", query, c
        print '\n'.join(sorted(hits))
    print scores
    print "Annotated", len(annotated_datasets), "datasets."
开发者ID:tetherless-world,项目名称:linkipedia,代码行数:62,代码来源:dataone_ontology_matching_by_query.py

示例3: progdev_all

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import empty [as 别名]
def progdev_all(boffile, gain):
    """ Initialize all roach boards with boffile and gain settings """
    roachlist = ['rofl%i'%i for i in range(1,16+1)]
    n_roach = len(roachlist)
    
    print "Programming all roaches with %s"%boffile
    print "Gain value: %ix"%gain
    print "Please wait..."
    # Create threads and message queue
    procs = []
    q     = JoinableQueue()
    for i in range(n_roach):
        p = Process(target=progdev_adc16, args=(roachlist[i], q, boffile, gain))
        procs.append(p)
    # Start threads
    for p in procs:
        p.start()
    # Join threads      
    for p in procs:
        p.join()
    
    # Print messages
    while q.empty() is False:
        print q.get()
    print "OK"
开发者ID:jkocz,项目名称:LEDA,代码行数:27,代码来源:adc16_initall.py

示例4: multi_write_selected_pfam_genes

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import empty [as 别名]
def multi_write_selected_pfam_genes(options, useful_pfam, annot_genes_all):
    '''
    Run "write_selected_pfam_genes" on multiple threads. 
    '''
    global q
    q = JoinableQueue() 
    for fam in useful_pfam:
        q.put(fam)
    for i in range(options.threads):
        p = Process(target = write_selected_pfam_genes, name = '%i' % (i+1), 
                    args = (options, annot_genes_all))
        p.start()
    sleep(options.threads*0.05)
    q.join()
    sleep(options.threads*0.05)
    if p.is_alive() and q.empty():
        sleep(options.threads*0.2)
        if p.is_alive() and q.empty():
            p.terminate()
开发者ID:dcasbioinfo,项目名称:sifter-t,代码行数:21,代码来源:ntaapfam_01.py

示例5: main

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import empty [as 别名]
def main():
    jobs = JoinableQueue()
    result = JoinableQueue()


    print len(datasets)
    numToProcess = 10
    scores = pd.DataFrame(columns=['precision','recall','fmeasure',
                                   'numResult','minScore','topHits',
                                   'contentWeight','relationWeight'])
    manual_annotations = get_manual_annotations(numToProcess)
    manual_tuples = get_ir_tuples(manual_annotations)

    for key in manual_annotations.keys():
        jobs.put(key)

    processed_count = Counter()
        
    for i in xrange(NUMBER_OF_PROCESSES):
        p = Process(target=work, args=(i, jobs, result, processed_count))
        p.daemon = True
        p.start()

    #work(1, jobs, result, processed_count)

    automated_annotations = {}

    jobs.join()

    while not result.empty():
        dataset, classes = result.get()
        automated_annotations[dataset] = classes
        result.task_done()

    automated_tuples = get_ir_tuples(automated_annotations)
    hits = manual_tuples & automated_tuples
    misses = manual_tuples - automated_tuples
    
    precision = float(len(hits)) / len(automated_tuples)
    recall = float(len(hits)) / len(manual_tuples)
    fmeasure = 2 * (precision * recall) / (precision + recall)
    # print '\t'.join([str(x) for x in [precision, recall, fmeasure,
    #                              numResult, minScore, topHits]])
    scores = scores.append(dict(precision=precision, recall=recall, fmeasure=fmeasure,
                                numResult=numResult, minScore=minScore, topHits=topHits,
                                contentWeight=contentWeight, relationWeight=relationWeight),
                        ignore_index=True)

    print scores
开发者ID:tetherless-world,项目名称:linkipedia,代码行数:51,代码来源:dataone_entity_linking_eml.py

示例6: test_basic

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import empty [as 别名]
def test_basic():
    in_queue = JoinableQueue()

    mysql_reader = Mysqlio('localhost','3600','test','root','') 
    mysql_reader.scan_and_queue(in_queue,"SELECT * FROM swallow")

    assert in_queue.qsize() == 3

    res = []
    while not in_queue.empty():
        res.append(in_queue.get())

    expected_res = [{'id':1,'libelle':'test'},{'id':2,'libelle':'john'},{'id':3,'libelle':'woo'}]

    assert res == expected_res
开发者ID:GalakFayyar,项目名称:TabordNG,代码行数:17,代码来源:test_mysql.py

示例7: FileReader

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import empty [as 别名]
class FileReader(Process):
    def __init__(self, filename, buffer_size=1000):
        super(FileReader, self).__init__()
        self.filename = filename
        self.que = JoinableQueue(buffer_size)
        self.event = Event()
        self.event.set()
        self.started = Event()
        self.started.clear()

    # It's crucial to call task_done on the queue after the item was processed
    def get_queue(self):
        return self.que

    def get_event(self):
        return self.event

    def is_done(self):
        return not self.event.is_set() and self.que.empty()

    def run(self):
        self.started.set()
        self.proc()
        self.event.clear()

    def proc(self):
        with open_gz(self.filename, encoding='utf-8') as file:
            for line in file:
                self.que.put(line)

    def __iter__(self):
        self.start()
        self.started.wait()
        while not self.is_done():
            try:
                text = self.que.get(timeout=0.1)
                yield text
                self.que.task_done()
            except Empty:
                pass
开发者ID:brmson,项目名称:synonyms-pccp,代码行数:42,代码来源:readers.py

示例8: __init__

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import empty [as 别名]

#.........这里部分代码省略.........
                mintime = self.startTime + random.randint(0, currentTimeWindow)
                maxtime = mintime + 3 * self.singleDay
                print(('Since:', datetime.fromtimestamp(mintime)))
                print(('Until:', datetime.fromtimestamp(maxtime)))
                print(('Previous Users:', len(self.recentUsers)))
                self.loadImageNamesIndex()
                if len(self.allImageNames) > self.max_num_images:
                    print("Max Images reached")
                    break
                # Search Images using the query terms
                for current_tag in range(0, num_queries):
                    dirNumName = self.dbdir.uploadCurrentDirAndGetNext(self.imagesPerDir, self.queryTerms)
                    print(("Current Directory Number: ", dirNumName))
                    #form the query string.
                    query_string = self.queryTerms[current_tag]
                    print(('\n\nquery_string is ' + query_string))
                    #only visit 8 pages max, to try and avoid the dreaded duplicate bug.
                    #8 pages * 250 images  = 2000 images, should be duplicate safe.  Most interesting pictures will be taken.
                    num_visit_pages = 16
                    pagenum = 1
                    while ( pagenum <= num_visit_pages ):
                        if (self.rate_q.qsize()>self.rate_limit):
                            #Age out time stamps older than one hour
                            found_all = False
                            while(not found_all):
                                next_stamp = self.rate_q.get()
                                if time.time() - next_stamp < 3600:
                                    found_all = True
                                    self.rate_q.put(next_stamp)

                            #Wait to age out time stamps if exceeded rate limit
                            if (self.rate_q.qsize()>self.rate_limit):
                                next_stamp = self.rate_q.get()
                                remaining_time = 3600 - (time.time() - next_stamp)
                                time.sleep(remaining_time)
                        self.rate_q.put(time.time()+60)
                        try:
                            rsp = fapi.photos_search(api_key=self.flickrAPIkeys[currentKey], ispublic="1", media="photos",
                                                     per_page=str(self.resultsPerPage), page=str(pagenum),
                                                     sort="interestingness-desc", text=query_string,
                                                     extras="tags, original_format, license, geo, date_taken, date_upload, o_dims, views, description",
                                                     min_upload_date=str(mintime),
                                                     max_upload_date=str(maxtime))
                            fapi.testFailure(rsp)
                        except KeyboardInterrupt:
                            print('Keyboard exception while querying for images, exiting\n')
                            raise
                        except (IOError, SSLError) as e:
                            print(('Error on Flickr photo request:{}\n'.format(e.strerror)))
                        except FlickrExpatError as e:
                            print(('Exception encountered while querying for images: {}\n'.format(e.message)))
                            print(('{}: {} to {} page {}\n'.format(query_string, mintime, maxtime, pagenum)))
                            print((e.xmlstr))

                            #I've identified two possible causes of this error: (1)Bad Gateway and (2)bad unicode characters in xml
                            time.sleep(5) #Waiting is best cure for bad gateway
                            pagenum = pagenum + 1 #Skipping to next page is best cure for bad character

                            #Just in case it has some connection to the rate limit, change the key
                            #Randomly choose flickrAPIkeys and flickrAPIsecrets
                            currentKey = int(math.floor(random.random()*len(self.flickrAPIkeys)))
                            # make a new FlickrAPI instance
                            fapi = FlickrAPI(self.flickrAPIkeys[currentKey], self.flickrAPIsecrets[currentKey])

                            self.flickrerrors += 1
                            if self.flickrerrors > 5:
                                print(("Too many Flickr Expat Errors in {}: Exiting".format(self.category)))
                                exit(1)
                        except Exception as e:
                            print((sys.exc_info()[0]))
                            print('Exception encountered while querying for images\n')
                        else:
                            # Process results
                            if getattr(rsp, 'photos', None):
                                if getattr(rsp.photos[0], 'photo', None):
                                    random.shuffle(rsp.photos[0].photo)
                                    for k in range(0, min(self.downloadsPerQuery, len(rsp.photos[0].photo))):
                                        b = rsp.photos[0].photo[k]
                                        if not self.isDuplicateImage(b):
                                            isDownloadable, url = self.get_url(b, fapi, "Medium 640")
                                            if isDownloadable:
                                                b["url"] = url
                                                self.queue.put((b, dirNumName))
                                    print('Waiting threads')
                                    self.queue.join()
                                    while not self.out_queue.empty():
                                        newImages.append(self.out_queue.get())
                                    print((len(newImages), ' downloaded images'))
                            pagenum = pagenum + 1  #this is in the else exception block.  It won't increment for a failure.
                            num_visit_pages = min(4, int(rsp.photos[0]['pages']))
                            # End While of Pages
                # BEGIN: PROCESS DOWNLOADED IMAGES
                self.updateImageNamesIndex(newImages)
            else:
                if command == "exit":
                    self.do_exit = True
                    print(("Wait for safe exit {}".format(self.category)))

        print('End')
        self.cfg.log(self.homeDir, "CRAWLER STOPPED")
开发者ID:crmauceri,项目名称:VisualCommonSense,代码行数:104,代码来源:flickr_threads_toSQL.py

示例9: main

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import empty [as 别名]
def main():
    jobs = JoinableQueue()
    result = JoinableQueue()


    numToProcess = -1
    scores = pd.DataFrame(columns=['fmeasure','precision','recall',
                                   'numResult','maxDistance','topHits',
                                   'contentWeight','relationWeight', 'hits', "contextSteps"])
    manual_annotations = get_manual_annotations(numToProcess)
    manual_tuples = get_ir_tuples(manual_annotations)

    print len(manual_annotations)
    for i in range(weighted_kmeans_clustering_passes):
        print "Training pass",i+1
        train_kmeans(manual_annotations.keys(), target_class_subtree)
        print "Complete."

    print "Training LSA..."
    lsa_model = train_lsa(manual_annotations)
    global useLSA
    useLSA = True
    global idf, lsa_model, target_classes, targets, target_class_subtree
    target_classes, idf, lsa_model = vectorize_ontology(graph, idf, lsa_model)
    subtree = set(graph.transitive_subjects(RDFS.subClassOf, oboe.MeasurementType))
    target_class_subtree = [x for x in target_classes if x.identifier in subtree and x.identifier != oboe.MeasurementType]
    targets = dict([(x.identifier, x) for x in target_class_subtree])
    print "Done."
    
    for key in manual_annotations.keys():
        jobs.put(key)

    processed_count = Counter()
        
    #for i in xrange(NUMBER_OF_PROCESSES):
    #    p = Process(target=work, args=(i, jobs, result, processed_count))
    #    p.daemon = True
    #    p.start()

    work(1, jobs, result, processed_count)

    automated_annotations = {}
    distances = {}

    jobs.join()

    while not result.empty():
        dataset, classes = result.get()
        automated_annotations[dataset] = set(classes.keys())
        distances[dataset] = classes
        result.task_done()

    automated_tuples = get_ir_tuples(automated_annotations)
    hits = manual_tuples & automated_tuples
    misses = manual_tuples - automated_tuples
    precision = float(len(hits)) / len(automated_tuples)
    recall = float(len(hits)) / len(manual_tuples)
    fmeasure = 2 * (precision * recall) / (precision + recall)
    # print '\t'.join([str(x) for x in [precision, recall, fmeasure,
    #                              numResult, minScore, topHits]])
    scores = scores.append(dict(precision=precision, recall=recall, fmeasure=fmeasure, hits=len(manual_tuples),topHits=topHits, maxDistance=maxDistance, contextSteps = context_steps),
                        ignore_index=True)
    print '\n'
    print scores
    results_file = 'results.csv'
    if len(sys.argv) > 1:
        results_file = sys.argv[1]
    hit_curves = csv.writer(open(results_file,'wb'),delimiter=",")
    hit_curves.writerow(['dataset','class','distance','hit'])
    
    for dataset, c in automated_tuples:
        distance = round(distances[dataset][c],3)
        hit = 1 if (dataset,c) in manual_tuples else 0
        hit_curves.writerow([dataset,c,distance,hit])
开发者ID:tetherless-world,项目名称:linkipedia,代码行数:76,代码来源:dataone_ontology_matching_lsa.py

示例10: FileIO

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import empty [as 别名]

#.........这里部分代码省略.........
        if self.seed is None:
            # Create random seed
            self.seed = int(1000.0*time.time())

        # Main seed so run can be reproduced
        self.dprint("INFO", "SEED = %d" % self.seed)
        # Flush log file descriptor to make sure above info is not written
        # to all log files when using multiple logs for each subprocess
        self.flush_log()
        stime = time.time()

        if not os.path.exists(self.datadir):
            # Create top level directory if it does not exist
            os.mkdir(self.datadir, 0777)
        self.datadir_st = os.stat(self.datadir)

        if self.nprocs > 1:
            # setup interprocess queue
            self.queue = JoinableQueue()
            processes = []
            for i in xrange(self.nprocs):
                # Run each subprocess with its own process id (tid)
                # The process id is used to set the random number generator
                # and also to have each process work with different files
                process = Process(target=self.run_process, kwargs={'tid':self.tid})
                processes.append(process)
                process.start()
                self.tid += 1
            done = False
            while not done:
                # Wait for a short time so main process does not hog the CPU
                # by checking the queue continuously
                time.sleep(0.1)
                while not self.queue.empty():
                    # Get any pending messages from any of the processes
                    level, msg = self.queue.get()
                    # Check if message is a valid count first
                    if level == "RBYTES":
                        self.rbytes += msg
                    elif level == "WBYTES":
                        self.wbytes += msg
                    elif level == "NOPEN":
                        self.nopen += msg
                    elif level == "NOPENDGR":
                        self.nopendgr += msg
                    elif level == "NOSYNC":
                        self.nosync += msg
                    elif level == "NCLOSE":
                        self.nclose += msg
                    elif level == "NREAD":
                        self.nread += msg
                    elif level == "NWRITE":
                        self.nwrite += msg
                    elif level == "NFSYNC":
                        self.nfsync += msg
                    elif level == "NRENAME":
                        self.nrename += msg
                    elif level == "NREMOVE":
                        self.nremove += msg
                    elif level == "NTRUNC":
                        self.ntrunc += msg
                    elif level == "NFTRUNC":
                        self.nftrunc += msg
                    elif level == "NLINK":
                        self.nlink += msg
                    elif level == "NSLINK":
开发者ID:alhazred,项目名称:nfstest,代码行数:70,代码来源:file_io.py

示例11: FindText

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import empty [as 别名]
class FindText(BaseWorkerCustomer):
    NUM_WORKING_PROCESSES = 2

    def __init__(self, params, *args, **kwargs):
        super(FindText, self).__init__(*args, **kwargs)

        self.path = params.get("path", "/")
        self.text = params.get("text", "")

        self.params = params

        # file queue to be processed by many threads
        self.file_queue = JoinableQueue(maxsize=0)
        self.result_queue = Queue(maxsize=0)
        self.result = []

        self.is_alive = {"status": True}

        self.re_text = re.compile(".*" + fnmatch.translate(self.text)[:-7] + ".*", re.UNICODE | re.IGNORECASE)
        # remove \Z(?ms) from end of result expression

    def run(self):
        try:
            self.preload()
        except Exception as e:
            result = {"error": True, "message": str(e), "traceback": traceback.format_exc()}

            self.on_error(self.status_id, result, pid=self.pid, pname=self.name)
            return

        def worker(re_text, file_queue, result_queue, logger, timeout):
            while int(time.time()) < timeout:
                if file_queue.empty() is not True:
                    f_path = file_queue.get()
                    try:
                        if not is_binary(f_path):
                            mime = mimetypes.guess_type(f_path)[0]

                            # исключаем некоторые mime типы из поиска
                            if mime not in ["application/pdf", "application/rar"]:
                                with open(f_path, "rb") as fp:
                                    for line in fp:
                                        try:
                                            line = as_unicode(line)
                                        except UnicodeDecodeError:
                                            charset = chardet.detect(line)
                                            if charset.get("encoding") in ["MacCyrillic"]:
                                                detected = "windows-1251"
                                            else:
                                                detected = charset.get("encoding")

                                            if detected is None:
                                                break
                                            try:
                                                line = str(line, detected, "replace")
                                            except LookupError:
                                                pass

                                        if re_text.match(line) is not None:
                                            result_queue.put(f_path)
                                            # logger.debug("matched file = %s " % f_path)
                                            break

                    except UnicodeDecodeError as unicode_e:
                        logger.error("UnicodeDecodeError %s, %s" % (str(unicode_e), traceback.format_exc()))

                    except IOError as io_e:
                        logger.error("IOError %s, %s" % (str(io_e), traceback.format_exc()))

                    except Exception as other_e:
                        logger.error("Exception %s, %s" % (str(other_e), traceback.format_exc()))
                    finally:
                        file_queue.task_done()
                else:
                    time.sleep(REQUEST_DELAY)

        try:
            self.logger.debug("findText started with timeout = %s" % TIMEOUT_LIMIT)
            time_limit = int(time.time()) + TIMEOUT_LIMIT
            # Launches a number of worker threads to perform operations using the queue of inputs
            for i in range(self.NUM_WORKING_PROCESSES):
                p = Process(
                    target=worker, args=(self.re_text, self.file_queue, self.result_queue, self.logger, time_limit)
                )
                p.start()
                proc = psutil.Process(p.pid)
                proc.ionice(psutil.IOPRIO_CLASS_IDLE)
                proc.nice(20)
                self.logger.debug(
                    "Search worker #%s, set ionice = idle and nice = 20 for pid %s" % (str(i), str(p.pid))
                )
                self.processes.append(p)

            abs_path = self.get_abs_path(self.path)
            self.logger.debug("FM FindText worker run(), abs_path = %s" % abs_path)

            if not os.path.exists(abs_path):
                raise Exception("Provided path not exist")

            self.on_running(self.status_id, pid=self.pid, pname=self.name)
#.........这里部分代码省略.........
开发者ID:LTD-Beget,项目名称:sprutio-rpc,代码行数:103,代码来源:findText.py

示例12: main

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import empty [as 别名]
def main(fileName):

    # load test config and default values
    with open(fileName, 'r') as f:
        tests = json.load(f)
    prepare_tests_settings(tests)
    default = tests['default']

    jobQueue = JoinableQueue()
    resultQueue = JoinableQueue()

    # NOTE: some parameters are obsolete as they are overruled by the parameters in individual tests
    if default['browser'].lower() == 'chrome':
        # use producer-consumer mode for chrome
        # this mode helps isolating individual failures
        # as well as supporting parallel browsers
        workers = start_parallel_instances(default, jobQueue, resultQueue)
        dispatch_parallel_tests(tests, jobQueue)

        def terminate_jobs(_, __):
            logging.warning("SIGINT: terminating all the intances ")
            for worker in workers:
                # SIGTERM will trigger teardown function of the workers
                # so that they could nicely kill the processes (chrome, Xvfb) they started
                os.kill(worker.pid, signal.SIGTERM)
                time.sleep(0.5)
            sys.exit(-1)
        # SIGINT is for nice teardown
        # NOTE: if SIGKILL this process, there could be orphan processes that stop new tests
        # one must manually kill them if that happens
        signal.signal(signal.SIGINT, terminate_jobs)
        #loader = ChromeLoader(disable_quic=default['disable_quic'], disable_spdy=default['disable_spdy'],
        #                      check_protocol_availability=False, save_packet_capture=True,
        #                      log_ssl_keys=default['log_ssl_keys'], save_har=True, disable_local_cache=False,
        #                      headless=default['headless'], ignore_certificate_errors=default['ignore_certificate_errors'])
        #loader.load_pages(tests)
        #pprint.pprint(dict(loader.load_results))

        # then wait for the queue to be empty
        jobQueue.join()

        while not resultQueue.empty():
            # print all the test reports
            result = resultQueue.get(False)
            print result
            resultQueue.task_done()
        # send teardown message then wait
        teardown_parallel_instances(default, jobQueue)
        jobQueue.join()

    elif default['browser'].lower() == 'firefox':
        # simplier single thread mode for firefox
        loader = FirefoxLoader(disable_quic=default['disable_quic'], disable_spdy=default['disable_spdy'],
                               check_protocol_availability=False, save_packet_capture=True,
                               log_ssl_keys=default['log_ssl_keys'], save_har=True, disable_local_cache=False,
                               headless=default['headless'], ignore_certificate_errors=default['ignore_certificate_errors'])
        loader.load_pages(tests)
        pprint.pprint(dict(loader.load_results))
    else:
        logging.critical('Uknown browser %s', default['browser'].lower())
        sys.exit(-1)
开发者ID:eaufavor,项目名称:chrome-webpage-profiler,代码行数:63,代码来源:test_driver.py

示例13: JoinableQueue

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import empty [as 别名]
    queue_log = JoinableQueue()
    test_commprocess = CommProcess(port=8080,address='127.0.0.1',events={'enable_comms':event_enable_comms,'client_disconnect':event_client_disconnect},queues={'tx_msg':queue_tx,'rx_msg':queue_rx,'log':queue_log},debug_log=debug_log_path)

    retcode = 0
    
    try:
        
        event_enable_comms.set()
        
        event_enable_comms.set()
        test_commprocess.start()

        while True:
          
                
            if queue_log.empty()==False:
                print pop_queue(queue_log)
                
            if queue_rx.empty()==False:
                msg = pop_queue(queue_rx)
                print msg
                queue_tx.put("echoing %s\n"%msg)

            ## check if client disconnected :
            if event_client_disconnect.wait(PROCESS_EVENT_CLIENT_DISCONNECT_TIMEOUT_S):
                

                ## check if the commprocess is still alive :
                if not test_commprocess.is_alive():
                    ## what happened ?
                    commprocess_retcode = test_commprocess.exitcode
开发者ID:bcare,项目名称:roverpi,代码行数:33,代码来源:commctrl.py

示例14: main

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import empty [as 别名]
def main():
    jobs = JoinableQueue()
    result = JoinableQueue()


    numToProcess = -1
    scores = pd.DataFrame(columns=['fmeasure','precision','recall',
                                   'numResult','maxDistance','topHits', 'hits', "contextSteps"])
    manual_annotations = get_manual_annotations(numToProcess)
    manual_tuples = get_ir_tuples(manual_annotations)

    print len(manual_annotations)
    for i in range(weighted_kmeans_clustering_passes):
        print "Training pass",i+1
        train_kmeans(manual_annotations.keys(), target_class_subtree)
        print "Complete."

    for key in manual_annotations.keys():
        jobs.put(key)

    processed_count = Counter()
        
    for i in xrange(NUMBER_OF_PROCESSES):
        p = Process(target=work, args=(i, jobs, result, processed_count))
        p.daemon = True
        p.start()

    #work(1, jobs, result, processed_count)

    automated_annotations = {}
    distances = {}

    jobs.join()

    while not result.empty():
        dataset, classes = result.get()
        automated_annotations[dataset] = set(classes.keys())
        distances[dataset] = classes
        result.task_done()

    automated_tuples = get_ir_tuples(automated_annotations)
    hits = manual_tuples & automated_tuples
    misses = manual_tuples - automated_tuples
    precision = float(len(hits)) / len(automated_tuples)
    recall = float(len(hits)) / len(manual_tuples)
    fmeasure = 2 * (precision * recall) / (precision + recall)
    # print '\t'.join([str(x) for x in [precision, recall, fmeasure,
    #                              numResult, minScore, topHits]])
    scores = scores.append(dict(precision=precision, recall=recall, fmeasure=fmeasure, hits=len(manual_tuples),topHits=topHits, maxDistance=maxDistance, contextSteps = context_steps),
                        ignore_index=True)
    print '\n'
    print scores
    results_file = 'results.csv'
    if len(sys.argv) > 1:
        results_file = sys.argv[1]
    hit_curves = csv.writer(open(results_file,'wb'),delimiter=",")
    hit_curves.writerow(['dataset','class','distance','hit'])
    
    for dataset, c in automated_tuples:
        distance = round(distances[dataset][c],3)
        hit = 1 if (dataset,c) in manual_tuples else 0
        hit_curves.writerow([dataset,c,distance,hit])
开发者ID:tetherless-world,项目名称:linkipedia,代码行数:64,代码来源:dataone_ontology_matching.py

示例15: main

# 需要导入模块: from multiprocessing import JoinableQueue [as 别名]
# 或者: from multiprocessing.JoinableQueue import empty [as 别名]
def main():
    global L2C_BEGIN, L2C_END, L2C_DELTA, L2G_BEGIN, L2G_END, L2G_DELTA
    global NFOLDS, ADD_ARGS, SVM_TRAIN, TRAIN_DATA, N_PER_SSH
    
    parser = OptionParser(
        usage="usage: %prog [options] <dataset> <gridscore-file>")
    parser.add_option("--log2c", dest="log2c", metavar="BEGIN END STEP",
        type='float', nargs=3, default=(L2C_BEGIN, L2C_END, L2C_DELTA),
        help="log2 of C SVM contraint [default: %default]")
    parser.add_option("--log2g", dest="log2g", metavar="BEGIN END STEP",
        type='float', nargs=3, default=(L2G_BEGIN, L2G_END, L2G_DELTA),
        help="log2 of G SVM contraint [default: %default]")
    parser.add_option("-v", "--fold", dest="fold", metavar="FOLD",
        type='int', default=NFOLDS,
        help="number of cross validation folds [default: %default]")
    parser.add_option("-a", "--args", dest="args", metavar="ARGS",
        type='string', default=ADD_ARGS,
        help="additional arguments to the SVM trainer [default: %default]")
    parser.add_option("--svm-train", dest="svm_train", metavar="PATHNAME",
        type='string', default=SVM_TRAIN,
        help="path of SVM trainer [default: %default]")
    (options, args) = parser.parse_args()
    if len(args) != 2:
        parser.print_usage(file=sys.stderr)
        return 1

    L2C_BEGIN, L2C_END, L2C_DELTA = options.log2c
    L2G_BEGIN, L2G_END, L2G_DELTA = options.log2g
    NFOLDS = options.fold
    ADD_ARGS = options.args
    SVM_TRAIN = options.svm_train
    TRAIN_DATA, outfile = args
    
    job_queue = Queue()
    result_queue = Queue()

    for log2c, log2g in product(
            frange(L2C_BEGIN, L2C_END, L2C_DELTA),
            frange(L2G_BEGIN, L2G_END, L2G_DELTA)):
        job_queue.put((log2c, log2g))

    for i in range(LOCAL_WORKERS):
        LocalWorker('local-%d' % i, job_queue, result_queue).start()
    
    for i, host in enumerate(SSH_WORKERS):
        for j in range(N_PER_SSH):
            SSHWorker('ssh-%d/%d' % (i, j), 
                host, job_queue, result_queue).start()

    #block until all jobs are done
    job_queue.join()
    
    result = []
    while not result_queue.empty():
        result.append(result_queue.get())
    result = sorted(result, key=op.itemgetter(3,1,2), reverse=True)

    _, best_log2c, best_log2g, best_score = max(result, key=op.itemgetter(3,1,2))

    with open(outfile, 'w') as ofp:
        ofp.write("#best result: log2c=%f, log2g=%f, score=%f\n" % \
            (best_log2c, best_log2g, best_score))
        ofp.write("#log2(c)\tlog2(g)\tscore\n")
        for (name, log2c, log2g, score) in result:
            ofp.write("%f\t%f\t%f\n" % (log2c, log2g, score))

    return 0
开发者ID:Dryuna,项目名称:svm-tools,代码行数:69,代码来源:grid-search.py


注:本文中的multiprocessing.JoinableQueue.empty方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。