当前位置: 首页>>代码示例>>Python>>正文


Python Manager.empty方法代码示例

本文整理汇总了Python中multiprocessing.Manager.empty方法的典型用法代码示例。如果您正苦于以下问题:Python Manager.empty方法的具体用法?Python Manager.empty怎么用?Python Manager.empty使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.Manager的用法示例。


在下文中一共展示了Manager.empty方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: upload_test

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import empty [as 别名]
 def upload_test(self):
     start_time = time.time()
     q = Manager().Queue()
     plist = []
     for i in range(self.upload_user):
         proc = Process(target=self.upload_one_user, args=(q,))
         plist.append(proc)
     for proc in plist:
         proc.start()
     for proc in plist:
         proc.join()
     while True:
         if q.empty():
             break
         else:
             if q.get() == 0:
                 self.upload_success += 1
             else:
                 self.upload_fail += 1
     use_time = time.time() - start_time
     table = PrettyTable(["key", "value"])
     table.add_row(["One File Size (M)", self.upload_file_size])
     table.add_row(["All File Size (M)", self.upload_file_size * self.upload_number * self.upload_user])
     table.add_row(["Process Count(user)", self.upload_user])
     table.add_row(["Upload Count", self.upload_number * self.upload_user])
     table.add_row(["Interval Time(s)", self.upload_time])
     table.add_row(["Success count", self.upload_success])
     table.add_row(["Fail count", self.upload_fail])
     table.add_row(["Success ratio (%)",
                    (round(self.upload_success / float(self.upload_number * self.upload_user), 4) * 100)])
     table.add_row(["Use time (s)", "%.2f" % use_time])
     print table
开发者ID:w4n9H,项目名称:pyfdfs,代码行数:34,代码来源:upload_test.py

示例2: main

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import empty [as 别名]
def main():
    arg_parser = argparse.ArgumentParser(description='bbd compressing program')
    arg_parser.add_argument('-compress_from_dir', type=str, default='.',
                            help='directory where needs to be compressed')
    arg_parser.add_argument('-compress_to_dir', type=str, default='.',
                            help='directory where puts compressed file')
    arg_parser.add_argument('-compress_method', default='bz2', choices=['bz2', 'gz'],
                            help='the method of compressing, '
                                 'support bz2 and gz, bz2 is default')
    arg_parser.add_argument('-compress_dir_match', default=None,
                            help='regular expressions what matches '
                                 'which directories can be compressed')
    arg_parser.add_argument('-compress_file_match', default=None,
                            help='regular expressions what matches '
                                 'which files can be compressed')

    args = arg_parser.parse_args()
    kwargs = dict()
    kwargs['compress_from_dir'] = os.path.abspath(args.compress_from_dir)
    kwargs['compress_to_dir'] = os.path.abspath(args.compress_to_dir)
    kwargs['compress_method'] = args.compress_method
    kwargs['compress_dir_match'] = args.compress_dir_match
    kwargs['compress_file_match'] = args.compress_file_match
    print('Operating parameters are as follows:')
    print('\t' + '\n\t'.join(['{}: {}'.format(k, v) for k, v in kwargs.items()]))

    if check_compress_proc_is_alive():
        return

    if kwargs['compress_from_dir'] == kwargs['compress_to_dir']:
        print(kwargs['compress_from_dir'], kwargs['compress_to_dir'])
        compress_to_dir = os.path.join(kwargs['compress_to_dir'], 'flume_compressed_data')
        kwargs['compress_to_dir'] = compress_to_dir
        os.makedirs(compress_to_dir, exist_ok=True)

    max_worker = cpu_count() if cpu_count() <= 8 else 8
    pool_cls = Pool
    compressed_queue = Manager().Queue()
    print('using multi processes to compress files')

    path_mgr = PathUtils(**kwargs)
    compressed_data_dir = Path(kwargs['target_dir']) / 'bbd_compressed_data_dir'
    compress_method = kwargs['compress_method']
    for file_path in path_mgr.match_need_compress_files():
        from_path = str(file_path.absolute())
        to_path = str((compressed_data_dir / file_path.name).absolute())
        compressed_queue.put((from_path, to_path, compress_method))

    if compressed_queue.empty():
        print('there is no file need to be compressed, waiting for next checking')
        return

    multi_workers(max_worker=max_worker, pool_cls=pool_cls, work=compress_file,
                  compressed_queue=compressed_queue)
开发者ID:LooEv,项目名称:learning-python,代码行数:56,代码来源:sftp.py

示例3: upload_begin

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import empty [as 别名]
 def upload_begin(self):
     plist = []
     q = Manager().Queue()
     with open(self.list_path, 'r') as fp:
         for i in fp:
             if not i:
                 break
             md5_crc32 = i.strip()[:41]
             if md5_crc32 not in self.tmp_list and len(md5_crc32) == 41:
                 self.tmp_list.append(md5_crc32)
                 self.upload_num += 1
     print self.upload_num
     for md5_crc32_list in self.chunks(self.tmp_list, self.work_count):
         proc = Process(target=self.upload_file, args=(q, md5_crc32_list,))
         plist.append(proc)
     for proc in plist:
         proc.start()
     for proc in plist:
         proc.join()
     while True:
         if q.empty():
             break
         else:
             r = q.get()
             if r == 0:
                 self.success += 1
             elif r == 1:
                 self.fail += 1
             elif r == 2:
                 self.download_fail += 1
             else:
                 pass
     use_time = time.time() - self.start_time
     table = PrettyTable(["key", "value"])
     table.add_row(["Upload Count", len(set(self.tmp_list))])
     table.add_row(["Success count", self.success])
     table.add_row(["Fail count", self.fail])
     table.add_row(["Download Fail", self.download_fail])
     table.add_row(["Use time (s)", "%.2f" % use_time])
     print table
开发者ID:w4n9H,项目名称:pyfdfs,代码行数:42,代码来源:upload_test.py

示例4: startServer

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import empty [as 别名]
def startServer(host, port, options):
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
    s.bind((host, port))
    s.listen(0)

    queue = Manager().Queue()
    while True:
        print "main: waiting for connection"
        conn, addr = s.accept()
        print 'main: Connected by', addr

        data = conn.recv(1024)
        print 'received port request'
        p = Process(target = serverNewClient, args = (queue, options, ))
        p.start()
        while queue.empty():
            time.sleep(0.05)
            print "queue is still empty"
        port = queue.get()
        conn.sendall(str(port) + '\r\n')
        print "assigned port %d to new client" % port
开发者ID:cmusatyalab,项目名称:quiltview,代码行数:25,代码来源:proxy_server.py

示例5: launch_expeditions

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import empty [as 别名]
    def launch_expeditions( self , task_request_list , moon_name_list=None ):
        
        global expedition
        
        # ---[ 1 ]------------------------------------------------------
        
        self.log.show( 'Checking Moon list sent by user' )
        
        working_moons = []
        
        if not moon_name_list :
            
            self.log.show( 'Traveling to available Moons on Orbit' )
            
            working_moons = self.orbit.values()
            
        else :
            
            self.log.show( 'Traveling to ' + str( moon_name_list ) )
            
            working_moons = [ self.orbit.get_moon( moon_name ) for moon_name in moon_name_list ]
            
        # ---[ 2 ]------------------------------------------------------
        
        self.log.show( 'Build Thread-safe Queues with no maximum size' )
        
        recv_queue = Manager().Queue( )#len(task_request_list) )
        
        send_queue  = Manager().Queue( )#len(task_request_list) )

        # ---[ 3 ]------------------------------------------------------
        
        self.log.show( 'Enqueue tasks on "send_queue" object' )
        
        for task_obj in task_request_list : 
            
            send_queue.put_nowait( str(task_obj) ) # "Normal" Objects are note thread safe!
            
        self.log.show( 'send_queue = ' + str(send_queue.qsize())+'/'+str(len(task_request_list)) + 'tasks')
        
        # ---[ 4 ]------------------------------------------------------
        
        self.log.show( 'Starting up Process Pool' )
                
        pool = Pool(processes=len(working_moons))

        

        for moon in working_moons :
            
            #running_expeditions.append( Process( target=expedition , args=(self.name , moon.name , moon.ip , moon.port , taskrequest_queue , taskresponse_queue, ) ) ) # Process Object
            pool.apply_async( func=expedition , args=(self.name , moon.name , moon.ip , moon.port , send_queue , recv_queue , ) )

        # ---[ 5 ]------------------------------------------------------
        
        pool.close()
        pool.join()
        
        self.log.show( 'recv_queue = '+ str(recv_queue.qsize())+'/'+str(len(task_request_list)) + 'tasks' )
        
        tmp = []
        while not recv_queue.empty() :
            
            tmp.append( recv_queue.get() )
            
        self.log.show( 'closing queue' )
        
        self.log.show( 'return results' )
        
        return tmp
开发者ID:vyscond,项目名称:cosmos,代码行数:72,代码来源:cosmos.py

示例6: main

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import empty [as 别名]
def main():
    import argparse
    import logging
    import os
    import yaml

    parser = argparse.ArgumentParser()
    parser.add_argument('classifier')
    parser.add_argument('--postprocess', action="store_true",
                        help='Run postprocessing, close blobs and remove noise')
    parser.add_argument('videolist', help='A file listed all the videos to be indexed')
    parser.add_argument('cores', type=int, help='Number of processes of paralellism')
    args = parser.parse_args()

    logging.basicConfig(level=logging.WARNING,
                        format="%(asctime)s - %(message)s")

    classifier = zipfile.ZipFile(args.classifier)
    global forest0, svmmodels, training_bosts, hist0
    forest0, hist0, forest1, hist1, training_bosts, svmmodels, prior = \
        load_from_classifier(classifier)
    classifier.close()

    KEY_FRAME_PERIOD = 2 # in seconds
    #queue = Queue.Queue()
    #data_queue = Queue.Queue()
    queue = Manager().Queue()
    data_queue = Manager().Queue()

    for processes in [4]:    
        video_list = open(args.videolist, 'r')
        log_file = open('statistics%d.txt' % processes, 'w')

        fps = 0
        fps_count = 0

        for video_file in video_list:
            video_file = video_file.strip()
            name = os.path.splitext(video_file)[0]
            file_path = os.path.join(VIDEO_RESOURCE, video_file)
            log_file.write(file_path+"\n")

            capture = cv.CaptureFromFile(file_path)
            frame_rate = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)
            total_frames = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT)
            log_file.write("frame rate: %.3f, total frames: %d\n" % (frame_rate, total_frames)) 

            start_time0 = time.time()
            key_frame_counter = 0    
            frame = cv.QueryFrame(capture)
            os.makedirs("tmp")
            while frame:
                cv.SaveImage("tmp/" + name + "%d.png" % key_frame_counter, frame)
                for i in xrange(int(KEY_FRAME_PERIOD * frame_rate)):
                    frame = cv.QueryFrame(capture)
                key_frame_counter += 1
            for i in xrange(key_frame_counter):
                 data_queue.put(i)

            start_time = time.time()

            ps = []
            for group in xrange(processes):
                p = Process(target = calculate_class, args=(name, queue, data_queue, ))
                #p = threading.Thread(target = calculate_class, args=(name, queue, data_queue, ))
                p.start()
                ps.append(p)
            for p in ps:
                p.join()

            elapse_time = time.time() - start_time

            accuracy_file = open('360.txt', 'w')
            while not queue.empty():
                q_entry = queue.get()
                frame_counter = q_entry[0]
                ILP = q_entry[1]
                accuracy_file.write('%d' % frame_counter)
                for class_index, score in enumerate(ILP):
                    accuracy_file.write(',%.02f' % score)
                accuracy_file.write('\n')
            accuracy_file.close()

            os.system("rm -rf tmp")

            log_file.write("decoding time: %.2f, total time: %.2f, key frames: %d, frame per sec: %.3f\n" \
                % (start_time - start_time0, elapse_time, key_frame_counter, key_frame_counter / elapse_time))
            fps += key_frame_counter / elapse_time
            fps_count += 1

            #time.sleep(10)

        video_list.close()
        log_file.write("average fps: %.3f\n" % (fps/fps_count))
        log_file.close()
开发者ID:cmusatyalab,项目名称:GigaSight,代码行数:97,代码来源:indexer_multi.py

示例7: main

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import empty [as 别名]
def main():
    import argparse
    import logging
    import os
    import yaml
    import cv

    global processes
    global forest0, svmmodels, training_bosts, hist0

    parser = argparse.ArgumentParser()
    parser.add_argument('classifier')
    parser.add_argument('cores', type=int, help='Number of processes of paralellism')
    parser.add_argument('--postprocess', action="store_true",
                        help='Run postprocessing, close blobs and remove noise')
    args = parser.parse_args()

    logging.basicConfig(level=logging.WARNING,
                        format="%(asctime)s - %(message)s")

    classifier = zipfile.ZipFile(args.classifier)
    forest0, hist0, forest1, hist1, training_bosts, svmmodels, prior = \
        load_from_classifier(classifier)
    classifier.close()
    
    processes = args.cores
    pool = Pool(processes = processes)

    KEY_FRAME_PERIOD = 2 # in seconds
    q = Manager().Queue()
    total_frame = 0

    new_flag = True
    while True:
        if not new_flag:
            print "wait..."
            time.sleep(1)
        stream_list = get_list(CLOUDLET_RESOURCE, STREAM_RESOURCE)
        new_flag = False
        prev_stream = None
        for stream in stream_list:
            if stream.get("stream_description").find("denatured") == -1 or stream.get("stream_description").find("video") == -1 or stream.get("stream_description").find("pstf") != -1:
                prev_stream = stream
                continue
            ILP_max = [] 
            for i in xrange(len(CLASSES)):
                ILP_max.append(0)
            ILP_list = []
            for i in xrange(len(CLASSES)):
                ILP_list.append([])
            path, name = stream.get("path").replace("mnt", "cloudletstore").rsplit('/', 1)
            print os.path.join(path, name)
            path_p, name_p = prev_stream.get("path").replace("mnt", "cloudletstore").rsplit('/', 1)
            print os.path.join(path_p, name_p)
            statinfo = os.stat(os.path.join(path_p, name_p))      
            prev_stream = stream
           
            if statinfo.st_size == 0:
                continue

            new_flag = True
            frame_rate = 30
     
            capture = cv.CaptureFromFile(os.path.join(path, name))
            frame_rate = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)
            total_frames = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT)
            frame = cv.QueryFrame(capture)
            print frame_rate, total_frames
            print capture
   
            start_time = time.time()
            
            key_frame_counter_base = 0    
            while frame:
                process_num = 0
                while frame:
                    cv.SaveImage("indexing" + "%d.png" % process_num, frame)
                    for i in xrange(int(KEY_FRAME_PERIOD * frame_rate)):
                        frame = cv.QueryFrame(capture)
                    process_num += 1
                    if process_num == processes:
                        break
                pool.map(calculate_class, [(q, x) for x in xrange(key_frame_counter_base, key_frame_counter_base + process_num)])
          
            while not q.empty():
                q_entry = q.get()
                key_frame_counter = q_entry[0]
                ILP = q_entry[1]
                for class_index, score in enumerate(ILP): 
                    if score > SCORE_THRESHOLD:
                        ILP_list[class_index].append((key_frame_counter * int(KEY_FRAME_PERIOD * frame_rate) + 1, score))
                        print (CLASSES[class_index], "%.02f" % score),
                    if score > ILP_max[class_index]:
                        ILP_max[class_index] = score
                print

                key_frame_counter_base += process_num

            for class_index, frame_list in enumerate(ILP_list):
                if not frame_list:
#.........这里部分代码省略.........
开发者ID:cmusatyalab,项目名称:GigaSight,代码行数:103,代码来源:indexer_pool.py

示例8: MPResult

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import empty [as 别名]
class MPResult(object):
    """
    Sync result between processes
    """

    MATCH = {}  # id -> instance

    def __init__(self, result):
        from multiprocessing import Manager

        # Test result instance
        self.result = result

        # Result queue
        self.queue = Manager().Queue()

    def __getattr__(self, item):
        return getattr(self.result, item)

    @staticmethod
    def pack_result_storage(storage):
        """
        Pack result from storage
        """
        return [(get_master_id(s[0]), s[1]) for s in storage]

    def unpack_result_storage(self, storage):
        """
        Unpack result from storage
        """
        unpack_storage = []

        for master_id, message in storage:
            unpack_storage.append(
                (self.MATCH[master_id], message),
            )

        return unpack_storage

    def match(self, suite):
        """
        Match id of master process to instance
        """
        self.MATCH[get_suite_master_id(suite)] = suite

        def match(s):
            for o in s:
                if isinstance(o, BaseSuite):
                    self.MATCH[get_suite_master_id(o)] = o
                    match(o)
                else:
                    self.MATCH[get_case_master_id(o)] = o

        match(suite)

    def save_result(self):
        """
        Save result in queue
        """
        self.queue.put(
            (
                (
                    self.pack_result_storage(self.result.errors),
                    self.pack_result_storage(self.result.skipped),
                    self.pack_result_storage(self.result.failures),
                ),
                self.result.testsRun,
            ),
        )

    def make_result(self):
        """
        Merge result from queue to result instance
        """
        while not self.queue.empty():
            (errors, skipped, failures), run_tests = self.queue.get()

            self.result.errors.extend(self.unpack_result_storage(errors))
            self.result.skipped.extend(self.unpack_result_storage(skipped))
            self.result.failures.extend(self.unpack_result_storage(failures))

            self.result.testsRun += run_tests
开发者ID:allavlasova,项目名称:task1,代码行数:84,代码来源:multiprocessing.py

示例9: Manager

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import empty [as 别名]
    FPS_clock = pygame.time.Clock()
    game_state = state.GameState()
    game_gui = gui.GUI(game_state)
    game_event_handler = event_handler.EventLogic(game_state, game_gui)
    game_gui.add_handler(game_event_handler)
    game_gui.draw(game_state.get_state())
    pygame.display.update()
    commandQueue = Manager().Queue()
    listeningProcess = Process(target=voice_listener, args=(game_event_handler, commandQueue,))
    while True:
        game_gui.draw(game_state.get_state())
        game_event_handler.event_handler()
        if game_state.get_state() == "SSH season voice mode" or game_state.get_state() == "Web season voice mode":
            if not game_event_handler.queue.empty():
                val = game_event_handler.queue.get()
                if val:
                    listeningProcess.start()
                else:
                    listeningProcess.terminate()
                    listeningProcess.join()
                    listeningProcess = Process(target=voice_listener, args=(game_event_handler, commandQueue,))
            if not commandQueue.empty():
                voice_command = commandQueue.get()
                try:
                    game_event_handler.pipi.say(voice_command %
                                                game_gui.bool_to_text[str(game_gui.light_to_string[voice_command])])
                except KeyError:
                    pass
        pygame.display.update()
        FPS_clock.tick(30)
开发者ID:sontung,项目名称:rPi-controller,代码行数:32,代码来源:main.py

示例10: range

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import empty [as 别名]
# Set up Processes
number_of_processes = 16
for i in range(number_of_processes):
    worker = MD5Cracker(work_queue, global_namespace)
    worker.start()
    workers.append(worker)

print "Target Hash: {}".format(hash)

maxChars = 13
while_count = 1
for baseWidth in range(1, maxChars + 1):

    while global_namespace.finished is False:
        if work_queue.empty():
            print "checking passwords width [" + `baseWidth` + "]"

            # set is width, position, baseString
            work_queue.put({'width': baseWidth, 'position': 0, 'baseString': ""})
            break
        else:

            if while_count % 10 == 0:
                global_namespace.count = 0
                while_count = 1
            else:
                print "{:,d} passwords/sec".format(global_namespace.count/while_count)
                while_count += 1

            print "Queue Size: {}".format(work_queue.qsize())
开发者ID:Hengjie,项目名称:High-Performance-MD5-Bruteforcer,代码行数:32,代码来源:md5cracker.py

示例11: Queue_server

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import empty [as 别名]
class Queue_server(object):
    
    '''
                 初始话公众号队列
     @param Tuple wx_lists 公众号列表
    '''
    def __init__(self ,wx_lists=()):
        self.__queue = Manager().Queue(-1)
        self.init_wx_lists(wx_lists)
        self.__fail_list = Manager().list()
    '''
                 初始话公众号队列
     @param Tuple wx_lists 公众号列表
    '''      
    def init_wx_lists(self ,wx_lists=()):
        for wx in wx_lists:
            self.put(wx)
    '''
                 添加元素
     @param mixed value 要添加的元素
    '''
    def put(self ,value):
        self.__queue.put(value)
    
    '''
                 弹出元素
     @return mixed       
    '''
    def get(self):
        if not self.empty():
            return self.__queue.get()
        return False
    
    '''
                 获取队列
     @return mixed       
    '''
    def get_wx_lists_queue(self):
        return self.__queue
    
    '''
                             获取队列大小
        @return int
    '''
    def get_size(self):
        return self.__queue.qsize()
    
    '''
                             队列是否为空
        @return bool
    '''
    def empty(self):
        return self.__queue.empty()
    
    '''
                             添加失败数据
        @param tuple wx_data 公众号信息
        @return bool
    '''     
    def put_fail_wx(self , wx_data):
        self.__fail_list.append(wx_data)
    
    '''
                             打印失败列表
    '''    
    def print_fail_list(self ,flush=None):
        if len(self.__fail_list) > 0 :
            for fail in self.__fail_list:
                self.put(fail)
                print 'the fail wx : {0}' . format(fail)
            if not flush:
                self.__fail_list = Manager().list()
        elif flush:
            print 'all success'
            
    #判断是否有错
    def is_have_failed(self):
        #判断是否有失败的公众号重新加入队列中
        return not self.empty()
开发者ID:yankaics,项目名称:get_wx_article,代码行数:81,代码来源:queue_server.py


注:本文中的multiprocessing.Manager.empty方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。