当前位置: 首页>>代码示例>>Python>>正文


Python BaseManager.shutdown方法代码示例

本文整理汇总了Python中multiprocessing.managers.BaseManager.shutdown方法的典型用法代码示例。如果您正苦于以下问题:Python BaseManager.shutdown方法的具体用法?Python BaseManager.shutdown怎么用?Python BaseManager.shutdown使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.managers.BaseManager的用法示例。


在下文中一共展示了BaseManager.shutdown方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: start

# 需要导入模块: from multiprocessing.managers import BaseManager [as 别名]
# 或者: from multiprocessing.managers.BaseManager import shutdown [as 别名]
    def start(self):

        # 把派发作业队列和完成作业队列注册到网络上
        BaseManager.register(
            'get_dispatched_job_queue', callable=self.get_dispatched_job_queue)
        BaseManager.register(
            'get_finished_job_queue', callable=self.get_finished_job_queue)

        # 监听端口和启动服务
        manager = BaseManager(address=('0.0.0.0', 8888), authkey='jobs')
        manager.start()

        # 使用上面注册的方法获取队列
        dispatched_jobs = manager.get_dispatched_job_queue()
        finished_jobs = manager.get_finished_job_queue()

        # 这里一次派发10个作业,等到10个作业都运行完后,继续再派发10个作业
        job_id = 0
        while True:
            for i in range(0, 10):
                job_id = job_id + 1
                job = Job(job_id)
                print('Dispatch job: %s' % job.job_id)
                dispatched_jobs.put(job)

            while not dispatched_jobs.empty():
                job = finished_jobs.get(60)
                print('Finished Job: %s' % job.job_id)

        manager.shutdown()
开发者ID:AllenThePythonic,项目名称:DEV,代码行数:32,代码来源:master.py

示例2: start

# 需要导入模块: from multiprocessing.managers import BaseManager [as 别名]
# 或者: from multiprocessing.managers.BaseManager import shutdown [as 别名]
    def start(self):
        BaseManager.register('get_dispatch_queue',
                             callable=self.get_dispatch_queue)
        BaseManager.register('get_finished_queue',
                             callable=self.get_finished_queue)

        manager = BaseManager(address=('0.0.0.0', 8000),
                              authkey=b'dispatcher')
        manager.start()

        dispatch_queue = manager.get_dispatch_queue()
        finished_queue = manager.get_finished_queue()

        job_id = 0
        unfinished = self.rnd * self.batch
        for rnd in range(self.rnd):
            for i in range(self.batch):
                job = Job(job_id=job_id)
                job_id += 1
                print('Dispatch Job {}'.format(job))
                dispatch_queue.put(job)

            while not dispatch_queue.empty():
                finished_job = finished_queue.get(60)
                unfinished -= 1
                print('Job finished {}'.format(finished_job))
        dispatch_queue.put(None)
        while unfinished > 0:
            finished_job = finished_queue.get(60)
            unfinished -= 1
            print('Job finished {}'.format(finished_job))
        manager.shutdown()
开发者ID:Time1ess,项目名称:MyCodes,代码行数:34,代码来源:job_dispatch.py

示例3: start

# 需要导入模块: from multiprocessing.managers import BaseManager [as 别名]
# 或者: from multiprocessing.managers.BaseManager import shutdown [as 别名]
    def start(self,skip=0):
        # 把派发作业队列和完成作业队列注册到网络上
        BaseManager.register('get_dispatched_job_queue', callable=self.get_dispatched_job_queue)
        BaseManager.register('get_finished_job_queue', callable=self.get_finished_job_queue)

        # 监听端口和启动服务
        manager = BaseManager(address=('0.0.0.0', rpc_port), authkey=authkey)
        manager.start()

        # 使用上面注册的方法获取队列
        dispatched_jobs = manager.get_dispatched_job_queue()
        finished_jobs = manager.get_finished_job_queue()

        job_id = 0
        module= self.project.modules[self.jobname];

        proj=json.loads(json.dumps(etl.convert_dict(self.project,self.project.__defaultdict__), ensure_ascii=False))
        while True:
            for task in etl.parallel_map(module):
                job_id = job_id + 1
                if job_id<skip:
                    continue
                job = ETLJob(proj, self.jobname, task, job_id);
                print('Dispatch job: %s' % job.id)
                dispatched_jobs.put(job)

            while not dispatched_jobs.empty():
                job = finished_jobs.get(60)
                print('Finished Job: %s, Count: %s' % (job.id, job.count))

            key=input('press any key to repeat,c to cancel')
            if key=='c':
                manager.shutdown()
                break
开发者ID:Veterun,项目名称:etlpy,代码行数:36,代码来源:distributed.py

示例4: QueueServer

# 需要导入模块: from multiprocessing.managers import BaseManager [as 别名]
# 或者: from multiprocessing.managers.BaseManager import shutdown [as 别名]
class QueueServer(object):

    def __init__(self, queue_server_host, queue_server_port, queue_server_authkey=None):
        '''
            host,port defines where your queuing server should be running while *authkey* 
            is going to be used to authenticate any communication between this queue server
            and clients connected to it. Clients will need to send the *authkey* to connect
            this queue server.
        '''
        self.host = queue_server_host
        self.port = queue_server_port
        self.authkey = queue_server_authkey

        '''
        Lets just say, we have a client that wants to put some image realted data into database 
        and also want to generate thumbnails from it (You know, where it is going,
        I'll give you a hint, checkout my last post about multi-threading)
        '''
        database_queue = Queue()
        thumbnail_queue = Queue()
        
        '''now we have a queue, but if since we want our clients to use it
        we'll need to register this queue with BaseManager via some callable that our client 
        can use to generate the proxy object. Yes, clients will be actually
        able to get the (proxy) object of this Queue and for them, they can
        pretty much use it like a regular queue (however, internally, BaseManager
        will be proxying that data sharing between client and server (and thats the 
        fun, we don't have to worry about locking, shared memory handling etc as 
        BaseManager will handle that, and for us it will be like using Queue between
        threads'''
        BaseManager.register('database_queue', callable=lambda:database_queue)
        BaseManager.register('thumbnail_queue', callable=lambda:thumbnail_queue)

        '''Now that we have registered our queue with BaseManager, we can instantiate
        manager object and start the server. As mentioned, BaseManager will spawn a 
        server in a subprocess and will handle all the communcation and data synchronization'''
        self.manager = BaseManager(address=(self.host, self.port), 
                                   authkey=self.authkey)
        
    def start(self):
        print 'Starting Server Process...'
        self.manager.start()
        
    def stop(self):
        self.manager.shutdown()
开发者ID:eulhaque,项目名称:avoid-picture-duplication---reclaim-your-space,代码行数:47,代码来源:server.py

示例5: start

# 需要导入模块: from multiprocessing.managers import BaseManager [as 别名]
# 或者: from multiprocessing.managers.BaseManager import shutdown [as 别名]
    def start(self):
        BaseManager.register('get_dispatched_job_queue', callable=self.get_dispatched_job_queue)
        BaseManager.register('get_finished_job_queue', callable=self.get_finished_job_queue)

        manager = BaseManager(address=('0.0.0.0', 8888), authkey='jobs')
        manager.start()

        dispatched_jobs = manager.get_dispatched_job_queue()
        finished_jobs = manager.get_finished_job_queue()

        job_id = 0
        while True:
            for i in range(0, 10):
                job_id = job_id + 1
                job = Job(job_id)
                print('Dispatch job: %s' % job.job_id)
                dispatched_jobs.put(job)

            while not dispatched_jobs.empty():
                job = finished_jobs.get(60)
                print('Finished Job: %s' % job.job_id)

        manager.shutdown()
开发者ID:kongxx,项目名称:garbagecan,代码行数:25,代码来源:master.py

示例6: not

# 需要导入模块: from multiprocessing.managers import BaseManager [as 别名]
# 或者: from multiprocessing.managers.BaseManager import shutdown [as 别名]
		manager.connect()

		launcher = manager.getUILauncher()
		launcher.lauchUI(args.uiname, pathUi)
	else:
		#lockLaunch()
		context = tvdcore.TVDContext()
		if not(context.isInitialized()) and not(context.initialize()):
			logger.error("Impossible d'initialiser le context")
			sys.exit(1)
		
		#Mise à disposition du laucher
		launcher = UILauncher()
		launcher.lauchUI(args.uiname, pathUi)
		
		BaseManager.register('getUILauncher', callable=lambda:launcher)
		manager = BaseManager(LAUNCHER_ADDRESS, LAUNCHER_AUTHKEY)
		
		manager.start()
		
		pluginMan = context.pluginManager
		
		import time
		while launcher.hasActiveUI():
			time.sleep(0.1)
		manager.shutdown()
		pluginMan.fermeture()
		context.historique.sauverHistorique()
		context.release()
		#unlockLaunch()
开发者ID:oogl-import,项目名称:tvdownloader,代码行数:32,代码来源:tvdownloader.py

示例7: manager

# 需要导入模块: from multiprocessing.managers import BaseManager [as 别名]
# 或者: from multiprocessing.managers.BaseManager import shutdown [as 别名]
def manager():
    # load 'session.txt', or call login() to generate it
    try:
        with open('session.txt', 'rb') as f:
            headers = cPickle.load(f)
            cookies = cPickle.load(f)
    except:
        print '[-] 无session.txt文件, 调用login()...'
        session = DoubanLogin().login()
        headers = session.headers
        cookies = session.cookies

    # create task queue and result queue
    task_queue = Queue.Queue()
    result_queue = Queue.Queue()

    # register
    BaseManager.register('get_task_queue', callable=lambda: task_queue)
    BaseManager.register('get_result_queue', callable=lambda: result_queue)

    # bound port 5000, set authkey
    manager = BaseManager(address=('', PORT), authkey='douban')
    manager.start()
    task = manager.get_task_queue()
    result = manager.get_result_queue()

    # load task file
    done = task2file.load('done.txt')
    todo = task2file.load('todo.txt')

    # initial task(if no task file)
    new = set([INIT_ID])
    todo = (todo | (new - (new & done)))
    count = 1

    try:
        while len(todo) != 0:
            try:
                # select an id_ then send it to worker's task queue
                id_ = todo.pop()
                task.put(id_)
                print '\n[+] ========  No.%d  ID: %s  ========' % (count, id_)
                print '[~] Send to task queue...'
                time.sleep(DELAY_TIME)
                new = get_user_id.get_id(id_, headers=headers, cookies=cookies)

                # set() operation, add new IDs to todo
                add = (new - (new & done))
                todo = (todo | add)
                print '[+] 新发现用户ID: %d 个' % len(add)
                print '[~] Receiving User Information...'
                data = result.get()

                # save user information to 'info.txt'
                get_user_info.write2file('info.txt', data)
                print '[+] 已将用户信息保存至: info.txt'
                # add id_ to done
                done.add(id_)
                count += 1

                # to avoid task set expanding too fast, write them to file in time
                task2file.save('todo.txt', todo)
                task2file.save('done.txt', done)

            except Exception, e:
                print e
                exit()

    finally:
        manager.shutdown()
        print '\n[+] Manager exit.'
        exit()
开发者ID:Kr0c,项目名称:DoubanRobot,代码行数:74,代码来源:manager.py


注:本文中的multiprocessing.managers.BaseManager.shutdown方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。