当前位置: 首页>>代码示例>>Python>>正文


Python SyncManager.dict方法代码示例

本文整理汇总了Python中multiprocessing.managers.SyncManager.dict方法的典型用法代码示例。如果您正苦于以下问题:Python SyncManager.dict方法的具体用法?Python SyncManager.dict怎么用?Python SyncManager.dict使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.managers.SyncManager的用法示例。


在下文中一共展示了SyncManager.dict方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import dict [as 别名]
    def __init__(self, servo_id):
        self.servo_id = servo_id
        self.angle = Value('f', 0.0)
        self.stop_signal = Value('b', False)

        # http://jtushman.github.io/blog/2014/01/14/python-|-multiprocessing-and-interrupts/
        manager = SyncManager() # instead of regular Manager because we want to ignore kb interrupt
        manager.start(Servo.init_mgr) # start the manager explicitly
        self.command_queue = manager.list([])
        self.current_command = manager.dict()

        self.finished = Value('b', False)
开发者ID:mlensment,项目名称:rebot,代码行数:14,代码来源:servo.py

示例2: Downloader

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import dict [as 别名]
class Downloader(object):
    def __init__(self, timeout=30, retries=100, wait=1):
        self.timeout = timeout
        self.retries = retries
        self.wait = wait
        
        self.manager = SyncManager()
        self.manager.start()
        
    def retry_fetch_data(self, url):
        market_data = self.fetch_data(url)
        
        retries = 1
        while not market_data and retries < self.retries:
            print "Retry #%s..." % str(retries)
            market_data = self.fetch_data(url)
            if market_data:
                print "Fetched: " + str(len(market_data))
            else:
                print "Fetched nothing!"
            retries += 1
        
        return market_data
    
    def fetch_data(self, url):
        limit = 60
        msg = "Downloading " + url[0: min(limit, len(url))] 
        if len(url) > limit:
            msg += "(+" + str(len(url) - limit) + ")"
        print msg
            
        return_dict = self.manager.dict()
        self.job = Process(target=get_page_data, args=(url, return_dict))
        self.job.start()
        
        self.job.join(self.timeout)
        if self.job.is_alive():
            self.job.terminate()
        self.job = None
        
        market_data = None
        if 'page' in return_dict:
            market_data = return_dict['page']
        
        if self.wait > 0:
            time.sleep(self.wait)
        
        return market_data
开发者ID:supremefist,项目名称:KinectBats,代码行数:50,代码来源:downloader.py

示例3: perform_analysis

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import dict [as 别名]
def perform_analysis(args, skip_handler, context, actions, metadata):
    """
    Perform static analysis via the given (or if not, all) analyzers,
    in the given analysis context for the supplied build actions.
    Additionally, insert statistical information into the metadata dict.
    """

    analyzers = args.analyzers if 'analyzers' in args \
        else analyzer_types.supported_analyzers
    analyzers, _ = analyzer_types.check_supported_analyzers(
        analyzers, context)

    ctu_collect = False
    ctu_analyze = False
    ctu_dir = ''
    if 'ctu_phases' in args:
        ctu_dir = os.path.join(args.output_path, 'ctu-dir')
        args.ctu_dir = ctu_dir
        if ClangSA.ANALYZER_NAME not in analyzers:
            LOG.error("CTU can only be used with the clang static analyzer.")
            return
        ctu_collect = args.ctu_phases[0]
        ctu_analyze = args.ctu_phases[1]

    if 'stats_enabled' in args and args.stats_enabled:
        if ClangSA.ANALYZER_NAME not in analyzers:
            LOG.debug("Statistics can only be used with "
                      "the Clang Static Analyzer.")
            return

    actions = prepare_actions(actions, analyzers)
    config_map = analyzer_types.build_config_handlers(args, context, analyzers)

    if 'stats_enabled' in args:
        config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
            SpecialReturnValueCollector.checker_analyze)

        config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
            ReturnValueCollector.checker_analyze)

    # Save some metadata information.
    versions = __get_analyzer_version(context, config_map)
    metadata['versions'].update(versions)

    metadata['checkers'] = {}
    for analyzer in analyzers:
        metadata['checkers'][analyzer] = {}

        for check, data in config_map[analyzer].checks().items():
            enabled, _ = data
            metadata['checkers'][analyzer].update({check: enabled})

    if ctu_collect:
        shutil.rmtree(ctu_dir, ignore_errors=True)
    elif ctu_analyze and not os.path.exists(ctu_dir):
        LOG.error("CTU directory: '%s' does not exist.", ctu_dir)
        return

    start_time = time.time()

    # Use Manager to create data objects which can be
    # safely shared between processes.
    manager = SyncManager()
    manager.start(__mgr_init)

    config_map = manager.dict(config_map)
    actions_map = create_actions_map(actions, manager)

    # Setting to not None value will enable statistical analysis features.
    statistics_data = __get_statistics_data(args, manager)

    if ctu_collect or statistics_data:
        ctu_data = None
        if ctu_collect or ctu_analyze:
            ctu_data = manager.dict({'ctu_dir': ctu_dir,
                                     'ctu_func_map_file': 'externalFnMap.txt',
                                     'ctu_temp_fnmap_folder':
                                     'tmpExternalFnMaps'})

        pre_analyze = [a for a in actions
                       if a.analyzer_type == ClangSA.ANALYZER_NAME]
        pre_analysis_manager.run_pre_analysis(pre_analyze,
                                              context,
                                              config_map,
                                              args.jobs,
                                              skip_handler,
                                              ctu_data,
                                              statistics_data,
                                              manager)

    if 'stats_output' in args and args.stats_output:
        return

    if 'stats_dir' in args and args.stats_dir:
        statistics_data = manager.dict({'stats_out_dir': args.stats_dir})

    ctu_reanalyze_on_failure = 'ctu_reanalyze_on_failure' in args and \
        args.ctu_reanalyze_on_failure

    if ctu_analyze or statistics_data or (not ctu_analyze and not ctu_collect):
#.........这里部分代码省略.........
开发者ID:Ericsson,项目名称:codechecker,代码行数:103,代码来源:analyzer.py

示例4: StorageManager

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import dict [as 别名]
class StorageManager(object):
    '''
    Manager for per sensor persistent storage.

    Uses `multiprocessing.managers.SyncManager` to give sensors access
    to a dict-like structure, which automagically synchronizes with
    the main process.

    Values are stored in sqlite as stringified JSON documents.
    '''
    # TODO #1505: check if storage manager is pickable
    def __init__(self, sqlite_factory):
        '''
        Initializes sync manager and logger.
        '''
        self.log = logger(self.__class__.__name__)
        self.storages = {}
        self.manager = SyncManager()

        def ignore_signals():
            '''
            Ignores SIGINT and SIGTERM.
            We don't want them propagated to SyncManager, because
            we want to store its' state to disk on Agent shutdown.
            '''
            signal.signal(signal.SIGINT, signal.SIG_IGN)
            signal.signal(signal.SIGTERM, signal.SIG_IGN)

        self.manager.start(ignore_signals)

        self.ppid = None

        self.sqliteconn = sqlite_factory()

    def get_storage(self, name):
        '''
        Retrieves storage for given name. If such storage doesn't exist,
        a new one, possibly with data got from sqlite, will be created.

        Note that name is not necessarily sensor's name. In fact,
        most of the time it will be sensor_name+hash(sensor_config).
        This way we can differentiate storages within one sensor type.
        '''
        self.log.debug('Storage requested for `{}`'.format(name))

        if name in self.storages:
            return self.storages[name]

        cursor = self.sqliteconn.cursor()
        cursor.execute("SELECT value FROM sensorstorage WHERE key=?", (name,))
        try:
            storage_data = json.loads(cursor.fetchone())
        # Catching too general exception
        # pylint: disable=W0703
        except Exception:
            storage_data = {}
        # Instance of 'SyncManager' has no 'dict' member
        # pylint: disable=E1101
        self.storages[name] = self.manager.dict(storage_data)
        return self.storages[name]

    def shutdown(self):
        '''
        Flushes all remaining storages into sqlite
        and shuts down manager.
        '''
        self.log.debug('Storage manager: Started shutdown')

        cursor = self.sqliteconn.cursor()
        for sensor, store in self.storages.iteritems():
            cursor.execute(
                'INSERT OR REPLACE INTO sensorstorage'
                ' (key, value) VALUES (?,?)',
                (sensor, json.dumps(dict(store))),
            )
        self.sqliteconn.commit()
        self.sqliteconn.close()
        self.manager.shutdown()

        self.log.debug('Storage manager: Finished shutdown')
开发者ID:whitehats,项目名称:monitowl-agent,代码行数:82,代码来源:agent.py

示例5: run

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import dict [as 别名]

#.........这里部分代码省略.........

        step = 4
        fwrite(fstep, '4')

    if step == 4:
        # Perform MAFFT (multiple sequence alignment)
        logger.info('Performing MAFFT.')
        if exists(dclu_aa):
            rmtree(dclu_aa)
        makedirs(dclu_aa)

        task[:] = []
        for i in (x for x in listdir(dfa_aa) if isfile(join(dfa_aa, x))):
            seg = i.split('.')
            namelength = int(seg[-2])
            task.append((join(dfa_aa, i), join(dclu_aa, '{0}.clu'.format('.'.join(seg[:-1]))), namelength))
        Pool(args.nthread).map_async(mafft_wrapper, task).get(pool_timeout)

        step = 5
        fwrite(fstep, '5')

    if step == 5:
        # Convert aa clustal files to nt clustal files
        logger.info('Converting protein clustal files to nucleotide clustal files.')
        if exists(dclu_nt):
            rmtree(dclu_nt)
        makedirs(dclu_nt)

        if 'qseq' not in locals():
            sid_sets = []
            idmap_qs = {}
            sid_qidset = {}
            for i in (join(dbesthit, x) for x in listdir(dbesthit) if isfile(join(dbesthit, x))):
                sid_set = set()
                for qid, sid, qf, sf in parse_blastext_frame(i):
                    sid_set.add(sid)
                    idmap_qs.update({qid: sid})
                    if sid in sid_qidset:
                        sid_qidset.get(sid).add(qid)
                    else:
                        sid_qidset.update({sid: {qid}})
                sid_sets.append(sid_set)

            common_sid_set = reduce(and_, sid_sets)
            qseq = {}
            for i in args.ss:
                for header, sequence in parse_fasta(i):
                    if idmap_qs.get(header) in common_sid_set:
                        qseq.update({header: sequence})
            for i in args.rs:
                for header, sequence in parse_fasta(i):
                    if idmap_qs.get(header) in common_sid_set:
                        qseq.update({header: sequence})
            if apply_rc:
                for i in args.rc:
                    for header, sequence in parse_fasta(i):
                        if idmap_qs.get(header) in common_sid_set:
                            qseq.update({header: sequence})

        task[:] = []
        for i in (x for x in listdir(dclu_aa) if isfile(join(dclu_aa, x))):
            task.append((join(dclu_aa, i), join(dclu_nt, i), qseq, 60))
        Pool(args.nthread).map_async(aa_to_nt_wrapper, task).get(pool_timeout)

        step = 6
        fwrite(fstep, '6')

    if step == 6:
        # Parse clustal files and report mutation profiles
        logger.info('Begining to parse clustal files and report mutation profiles.')
        manager = SyncManager()
        manager.start(lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))

        profile_aa = manager.dict()
        mutationparser = MutationParser(
            blocklen=args.blocklen,
            perfect_match_percent=args.perfect_match_percent,
            sidelen=args.sidelen,
            side_match_percent=args.side_match_percent)

        task[:] = []
        for i in (join(dclu_aa, x) for x in listdir(dclu_aa) if isfile(join(dclu_aa, x))):
            task.append((i, mutationparser, 'aa', profile_aa))
        Pool(args.nthread).map_async(parse_clustal_wrapper, task).get(pool_timeout)

        write_profile(fprofile_aa, 'aa', profile_aa)

        profile_nt = manager.dict()

        task[:] = []
        for i in (join(dclu_nt, x) for x in listdir(dclu_nt) if isfile(join(dclu_nt, x))):
            task.append((i, mutationparser, 'nt', profile_nt))
        Pool(args.nthread).map_async(parse_clustal_wrapper, task).get(pool_timeout)

        write_profile(fprofile_nt, 'nt', profile_nt)

        step = 7
        fwrite(fstep, '7')

    logger.info('Mutation discovery finished.')
开发者ID:jlhg,项目名称:pmcall,代码行数:104,代码来源:pmcall.py

示例6: f

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import dict [as 别名]
#!/usr/bin/env python

from multiprocessing.managers import SyncManager

def f(d, l):
    d[1] = '1'
    d['2'] = 2
    d[0.25] = None
    l.reverse()

if __name__ == '__main__':
    manager = SyncManager(address=('127.0.0.1', 0), authkey='abc')
    server = manager.get_server()

    manager.start()

    d = manager.dict()
    l = manager.list(range(10))

    print server.address
    server.serve_forever()

开发者ID:benthomasson,项目名称:mp,代码行数:23,代码来源:remote_manager.py

示例7: SpiderEngine

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import dict [as 别名]
class SpiderEngine(object):
	def __init__(self, cookie_file, url_queue_size, pg_queue_size, nr_downloadprocess, nr_parserprocess):
		super(SpiderEngine, self).__init__()

		self.logger = logging.getLogger(self.__class__.__name__)

		self.multiprocess_manager = SyncManager()#SyncManager(('',58585))
		self.multiprocess_manager.start()

		self.lck4urlq=self.multiprocess_manager.Lock()
		self.lck4pageq=self.multiprocess_manager.Lock()
		# event for suprocess to initiative exit.
		self.shutdown=self.multiprocess_manager.Event()

		self.url_queue=Queue(url_queue_size)
		self.page_queue=Queue(pg_queue_size)
		self.url_hist=self.multiprocess_manager.dict()
		self.urls= UrlScheduler(self.url_queue, self.url_hist, self.lck4urlq)

		# init multiprocess log
		self.mlog=get_logger()
		mhandler=logging.StreamHandler()
		mhandler.setFormatter(logging.Formatter('%(processName)s %(funcName)s() | %(message)s', '%H:%M:%S'))
		self.mlog.addHandler(mhandler)
		self.mlog.setLevel(logging.INFO)

		self.pages= PageScheduler(self.urls, self.page_queue, self.lck4pageq)
		self.downloader= PageDownloader(cookie_file, self.urls, self.pages, self.shutdown, self.multiprocess_manager, nr_downloadprocess, self.mlog)
		self.parser=PageParser(self.urls, self.pages, self.shutdown, self.multiprocess_manager, nr_parserprocess, self.mlog)


	def setSignalHandler(self):
		global EXITEVENT
		EXITEVENT=self.shutdown
		if sys.platform=='linux2':
			# 注册信号处理程序
##			signal.signal(signal.SIGUSR1,signalHandler)
			signal.signal(signal.SIGTERM,signalHandler)


	def doWork(self, initurl, allowHost, blockHost, allowPattern, skipPattern, nr_limit):
		info, debug=self.logger.info, self.logger.debug
		self.parser.addHost(allowHost, blockHost)
		self.parser.addPattern(allowPattern, skipPattern)
		self.downloader.createProcesses()
		self.parser.createProcesses()
		info('wait 2 secs ...')
		time.sleep(2)

		self.setSignalHandler()
##		debug('init url: %s',initurl)
		self.urls.addUrl('test', initurl)
##		debug('init url add.')
		old= 0 # self.downloader.nms.mcnt
		try:
			while True:
				if self.shutdown.wait(2):
					info('shutdown event got.')
					break

				if self.downloader.nms.mcnt != old:
					old=self.downloader.nms.mcnt
					info('\n%s\n\tdownloader mcnt: %d\n%s', '-*'*30, old, '-*'*30)
					if old>nr_limit:
						info('exceed nr_limit %d>%d, break', self.downloader.nms.mcnt, nr_limit)
						break

		except KeyboardInterrupt:
			info('got KeyboardInterrupt')
		finally:
			debug('\n%s', '~'*30)
			self.downloader.getStat()
			self.parser.getStat()
			debug('\n%s', '~'*30)
			self.exit()
			debug('\n%s', '*~'*30)
##		time.sleep(1)


	def exit(self):
		info, debug=self.logger.info, self.logger.debug
		self.shutdown.set()
		try:
			while True:
				self.page_queue.get_nowait()
		except Empty:
			pass

		try:
			while True:
				self.url_queue.get_nowait()
		except Empty:
			pass

##		info('page_queue is Empty: %s, %d', self.page_queue.empty(), self.page_queue.qsize())
##		info('url_queue is Empty: %s, %d', self.url_queue.empty(), self.url_queue.qsize())
		self.parser.closeProcesses()

		try:
			while True:
#.........这里部分代码省略.........
开发者ID:liveonnet,项目名称:postgetter-app,代码行数:103,代码来源:testSpider.py

示例8: start_pool

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import dict [as 别名]
def start_pool(config_file):
    manager = SyncManager()
    manager.start(sync_manager_init)
    status_dict = manager.dict()
    pr_queue = PriorityQueue()
    with open(config_file, 'r') as f:
        options = yaml.load(f)

    dev_macs = options.get('devices', None)
    if not dev_macs:
        print("No device found in the config file")
        return

    for dev_mac in dev_macs:
        if not is_mac_valid(dev_mac):
            raise ValueError("{} is not a valid MAC address!".format(dev_mac))
        pr_queue.put_nowait((0, dev_mac))
        status_dict[dev_mac] = 0

    battery_warn = options.get('battery_warn', 0) or 20
    log_dir = options.get('log_dir', 0) or './logs/'
    data_dir = options.get('data_dir', 0) or './data/'
    max_process = options.get('max_process', 0) or 3
    raw = options.get('raw', False)
    fname = os.path.join(data_dir, options.get('data_prefix', 'WED_data'))
    min_logs = options.get('min_logs', 1000)

    common_kwargs = {'command': Commands.DOWNLOAD,
                     'backend_lock': backend_lock,
                     'fname': fname,
                     'battery_warn': battery_warn,
                     'raw': raw,
                     'status_dict': status_dict,
                     'min_logs': min_logs,
                     }
    process_list = []
    max_process = min(max_process, len(dev_macs))
    retries = {d: 0 for d in dev_macs}

    def get_next_process():
        mac_address = pr_queue.get_nowait()[1]
        stop_event = Event()
        wake_up = Event()
        log_file = os.path.join(log_dir, "log_%s.log" % mac_address.replace(':', ''))
        kwargs = {'mac_address': mac_address,
                  'log_file': log_file,
                  'wake_up': wake_up,
                  'stop_event': stop_event,
                  }
        kwargs.update(common_kwargs)
        p = Process(target=start_command, kwargs=kwargs)
        return p, mac_address, status_dict[mac_address], wake_up, stop_event

    for i in range(max_process):
        process_list.append(get_next_process())

    for p in process_list:
        p[0].start()
    try:
        while len(process_list) > 0:
            p = process_list.pop(0)
            if p[0].is_alive():
                process_list.append(p)
            else:
                last_checked = status_dict[p[1]]
                if last_checked == p[2]:
                    retries[p[1]] += 1
                    if retries[p[1]] > 3:
                        last_checked = (datetime.now() - datetime(1970, 1, 1)).total_seconds()
                        retries[p[1]] = 0
                else:
                    retries[p[1]] = 0
                pr_queue.put_nowait((last_checked, p[1]))
                new_process = get_next_process()
                new_process[0].start()
                process_list.append(new_process)
            time.sleep(2)

    except (KeyboardInterrupt, SystemExit):
        delay = 4 * len([p for p in process_list if p[0].is_alive()])
        print("\nCancelling downloads..............\nWaiting %d seconds for all devices to clean up....\n" % delay)
        time.sleep(delay)
        for p in process_list:
            p[0].terminate()
        sys.exit(0)
    except:
        raise
开发者ID:ekipmanager,项目名称:wavelet-utils,代码行数:89,代码来源:wed_tool.py


注:本文中的multiprocessing.managers.SyncManager.dict方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。