当前位置: 首页>>代码示例>>Python>>正文


Python SyncManager.start方法代码示例

本文整理汇总了Python中multiprocessing.managers.SyncManager.start方法的典型用法代码示例。如果您正苦于以下问题:Python SyncManager.start方法的具体用法?Python SyncManager.start怎么用?Python SyncManager.start使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.managers.SyncManager的用法示例。


在下文中一共展示了SyncManager.start方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main_proc

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import start [as 别名]
def main_proc():

    pid = os.getpid()
    # initialize manager
    mgr = SyncManager()
    mgr.start(mgr_init)
    
    try:
        # Create share object between processes
        shared_queue = mgr.Queue()

        # Create subprocesses
        put_proc = Process(target=put_data_proc, args=(shared_queue,))
        put_proc_1 = Process(target=put_data_proc_1, args=(shared_queue,))
        get_proc = Process(target=get_data_proc, args=(shared_queue,))

        # Start the processes
        put_proc.start()
        put_proc_1.start()
        get_proc.start()

        # Join the processes until they finished
        put_proc.join()
        put_proc_1.join()
        get_proc.join()

    except KeyboardInterrupt:
        print "Main process (pid=%s) was interruptted" % pid
    finally:
        mgr.shutdown()
开发者ID:Tong-Wenjing,项目名称:practise,代码行数:32,代码来源:multiprocesses.py

示例2: __init__

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import start [as 别名]
 def __init__(self, args):
     # Init print management
     Print.init(log=args.log, debug=args.debug, all=args.all, cmd=args.prog)
     # Print command-line
     Print.command()
     self._process_color_arg(args)
     # Get project and related configuration
     self.project = args.project
     self.config_dir = args.i
     self.processes = args.max_processes if args.max_processes <= cpu_count() else cpu_count()
     self.use_pool = (self.processes != 1)
     self.lock = Lock()
     self.nbfiles = 0
     self.nbskip = 0
     self.nberrors = 0
     self.file_filter = []
     if args.include_file:
         self.file_filter.extend([(f, True) for f in args.include_file])
     else:
         # Default includes netCDF only
         self.file_filter.append(('^.*\.nc$', True))
     if args.exclude_file:
         # Default exclude hidden files
         self.file_filter.extend([(f, False) for f in args.exclude_file])
     else:
         self.file_filter.append(('^\..*$', False))
     self.dir_filter = args.ignore_dir
     # Init process manager
     if self.use_pool:
         manager = SyncManager()
         manager.start()
         Print.BUFFER = manager.Value(c_char_p, '')
         self.progress = manager.Value('i', 0)
     else:
         self.progress = Value('i', 0)
     self.tunits_default = None
     if self.project in DEFAULT_TIME_UNITS.keys():
         self.tunits_default = DEFAULT_TIME_UNITS[self.project]
     # Change frequency increment
     if args.set_inc:
         for table, frequency, increment, units in args.set_inc:
             if table not in set(zip(*FREQ_INC.keys())[0]):
                 raise InvalidTable(table)
             if frequency not in set(zip(*FREQ_INC.keys())[1]):
                 raise InvalidFrequency(frequency)
             keys = [(table, frequency)]
             if table == 'all':
                 keys = [k for k in FREQ_INC.keys() if k[1] == frequency]
             if frequency == 'all':
                 keys = [k for k in FREQ_INC.keys() if k[0] == table]
             for key in keys:
                 FREQ_INC[key] = [float(increment), str(units)]
     # Get reference time properties if submitted
     # Default is to deduce them from first file scanned
     self.ref_calendar = args.calendar
     self.ref_units = args.units
     # Init collector
     self.sources = None
开发者ID:Prodiguer,项目名称:nctime,代码行数:60,代码来源:context.py

示例3: __init__

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import start [as 别名]
 def __init__(self, *args, **kwargs):
     # init ingestor process, create tweet queue
     manager = SyncManager()
     manager.start(mgr_init)
     self.tweet_queue = manager.Queue()
     self.ingestion_process = multiprocessing.Process(target=do_ingestion, args=(self.tweet_queue,))
     self.ingestion_process.start()
     
     # call superclass init
     tweepy.StreamListener.__init__(self, *args, **kwargs)
开发者ID:pcallier,项目名称:insight,代码行数:12,代码来源:get_tweets.py

示例4: Manager

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import start [as 别名]
def Manager():
    '''
    Returns a manager associated with a running server process

    The managers methods such as `Lock()`, `Condition()` and `Queue()`
    can be used to create shared objects.
    '''
    from multiprocessing.managers import SyncManager
    m = SyncManager()
    m.start()
    return m
开发者ID:7modelsan,项目名称:kbengine,代码行数:13,代码来源:__init__.py

示例5: __init__

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import start [as 别名]
    def __init__(self, servo_id):
        self.servo_id = servo_id
        self.angle = Value('f', 0.0)
        self.stop_signal = Value('b', False)

        # http://jtushman.github.io/blog/2014/01/14/python-|-multiprocessing-and-interrupts/
        manager = SyncManager() # instead of regular Manager because we want to ignore kb interrupt
        manager.start(Servo.init_mgr) # start the manager explicitly
        self.command_queue = manager.list([])
        self.current_command = manager.dict()

        self.finished = Value('b', False)
开发者ID:mlensment,项目名称:rebot,代码行数:14,代码来源:servo.py

示例6: __init__

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import start [as 别名]
class DataSender:

    def __init__(self,phantfile):
        try:
            self.phant = json.load(open(phantfile, 'r'))
        except IOError:
            raise ValueError("Invalid phantfile location")
        self.running = True
        self._manager = SyncManager()


    def start(self):
        self._manager.start(self._mgr_init)
        self._que = self._manager.Queue()
        self._process = Process(target=self.up, args=(self._que,))
        self._process.start()

    def _mgr_init(self):
        signal.signal(signal.SIGINT, signal.SIG_IGN)
        print("initialized manager")

    def up(self,que):
        
        def stop(val,val2):
            print "process SIGINT stopping"
            self.running = False

        signal.signal(signal.SIGINT, stop)
        print('datauploader started')
        while self.running or not que.empty():
            item = json.loads(que.get(True))
            print("handling item={0}".format(item))
            self.httpsend(item)
            que.task_done()
            time.sleep(2)
        print("datauploader process terminating...")

    def send(self, data):
        self._que.put(data)

    def httpsend(self, data):
        postdata = urllib.urlencode(data)
        headers = {'Phant-Private-Key': self.phant['privateKey'] }
        req = urllib2.Request(self.phant['inputUrl'], postdata, headers)
        res = urllib2.urlopen(req)
        content = res.read()
        print("response: {0}".format(content))
    
    def stop(self):
        print("shutting down sender")
        self.running = False
        self._que.join()
        self._process.terminate()
开发者ID:tuokor,项目名称:animated-octo-lana,代码行数:55,代码来源:datasender.py

示例7: get_server_queue

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import start [as 别名]
def get_server_queue():
    #FIXME: some OSX users were getting "Can't assign requested address" errors
    # if we use socket.gethostname() for the address. Changing it to
    # 'localhost' seems to fix the issue, but I don't know why. We had to
    # use socket.gethostname() in order to get our benchmark tests to run
    # using qsub on a linux cluster, so with this 'fix', testflo benchmark tests
    # will likely not work on a cluster of OSX machines.
    if sys.platform == 'darwin':
        addr = 'localhost'
    else:
        addr = socket.gethostname()

    manager = SyncManager(address=(addr, 0), authkey=_testflo_authkey)
    manager.start()
    return manager, manager.Queue()
开发者ID:kmarsteller,项目名称:testflo,代码行数:17,代码来源:qman.py

示例8: init_good_sync_manager

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import start [as 别名]
def init_good_sync_manager():
    from multiprocessing.managers import SyncManager
    #handle SIGINT from SyncManager object
    def mgr_sig_handler(signal, frame):
        print 'not closing the mgr'

    #initilizer for SyncManager
    def mgr_init():
        import signal
        signal.signal(signal.SIGINT, mgr_sig_handler)
        print 'initialized mananger'

    #using syncmanager directly instead of letting Manager() do it for me
    manager = SyncManager()
    manager.start(mgr_init)
开发者ID:kennyjoseph,项目名称:twitter_dm,代码行数:17,代码来源:multiprocess_setup.py

示例9: Downloader

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import start [as 别名]
class Downloader(object):
    def __init__(self, timeout=30, retries=100, wait=1):
        self.timeout = timeout
        self.retries = retries
        self.wait = wait
        
        self.manager = SyncManager()
        self.manager.start()
        
    def retry_fetch_data(self, url):
        market_data = self.fetch_data(url)
        
        retries = 1
        while not market_data and retries < self.retries:
            print "Retry #%s..." % str(retries)
            market_data = self.fetch_data(url)
            if market_data:
                print "Fetched: " + str(len(market_data))
            else:
                print "Fetched nothing!"
            retries += 1
        
        return market_data
    
    def fetch_data(self, url):
        limit = 60
        msg = "Downloading " + url[0: min(limit, len(url))] 
        if len(url) > limit:
            msg += "(+" + str(len(url) - limit) + ")"
        print msg
            
        return_dict = self.manager.dict()
        self.job = Process(target=get_page_data, args=(url, return_dict))
        self.job.start()
        
        self.job.join(self.timeout)
        if self.job.is_alive():
            self.job.terminate()
        self.job = None
        
        market_data = None
        if 'page' in return_dict:
            market_data = return_dict['page']
        
        if self.wait > 0:
            time.sleep(self.wait)
        
        return market_data
开发者ID:supremefist,项目名称:KinectBats,代码行数:50,代码来源:downloader.py

示例10: Queue

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import start [as 别名]
class Queue():
    """Class Queue
    """

    __type = None
    __manager = None
    __address = None
    __authkey = None

    def __init__(self, qtype, address, authkey=''):
        """Class constructor

        Called when object is initialized

        Args:
           qtype (int): queue type, server|client
           address (str): queue address
           authkey (str): authentication key

        Raises:
           error: ValueError

        """

        if type in (queue.QUEUE_TYPE_SERVER, queue.QUEUE_TYPE_CLIENT):
            self.__type = qtype
        else:
            raise ValueError('Invalid Queue type')

        ''' Checking for address format AF_INET '''
        if address.find(':') > 0:
            address = address.split(':')

        self.__address = address
        self.__authkey = authkey

    def create(self):
        """Methods creates queue server

        Args:
           none

        Returns:
           void

        Raises:
           error: ValueError

        """

        if self.__type != queue.QUEUE_TYPE_SERVER:
            raise ValueError(
                'This operation cannot be done on this queue type')

        q = Queue()
        SyncManager.register('get_queue', callable=lambda: q)
        self.__manager = SyncManager(self.__address, self.__authkey)
        self.__manager.start()

    def destroy(self):
        """Methods destroys queue

        Args:
           none

        Returns:
           void

        """

        self.__manager.shutdown()

    def connect(self):
        """Methods connects to queue

        Args:
           none

        Returns:
           void

        Raises:
           error: ValueError

        """

        if self.__type != queue.QUEUE_TYPE_CLIENT:
            raise ValueError(
                'This operation cannot be done on this queue type')

        q = Queue()
        SyncManager.register('get_queue', callable=lambda: q)
        self.__manager = SyncManager(self.__address, self.__authkey)
        self.__manager.connect()
开发者ID:hydratk,项目名称:hydratk,代码行数:96,代码来源:queue.py

示例11: FooManager

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import start [as 别名]
FooManager.register('bar', bar)

if __name__ == '__main__':
    
    
    mgr = FooManager()
    mgr.start()
    
    res = mgr.bar()
    print res._getvalue()
    print str(res)[0]
    print type(res)
    
    sys.exit(0)
    
    qm = SyncManager()
    qm.start()
    
    mgr = FooManager()
    mgr.start()
    results = []
    for _i in xrange(1):
        q = qm.Queue()
        res = mgr.foo(3, q)
        print res
        print type(res)
        results.append(q.get())
        print '='*80
        sleep(3)
    sleep(5)
    print 'Done.'
开发者ID:allista,项目名称:DegenPrimer,代码行数:33,代码来源:mp_test.py

示例12: perform_analysis

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import start [as 别名]
def perform_analysis(args, skip_handler, context, actions, metadata):
    """
    Perform static analysis via the given (or if not, all) analyzers,
    in the given analysis context for the supplied build actions.
    Additionally, insert statistical information into the metadata dict.
    """

    analyzers = args.analyzers if 'analyzers' in args \
        else analyzer_types.supported_analyzers
    analyzers, _ = analyzer_types.check_supported_analyzers(
        analyzers, context)

    ctu_collect = False
    ctu_analyze = False
    ctu_dir = ''
    if 'ctu_phases' in args:
        ctu_dir = os.path.join(args.output_path, 'ctu-dir')
        args.ctu_dir = ctu_dir
        if ClangSA.ANALYZER_NAME not in analyzers:
            LOG.error("CTU can only be used with the clang static analyzer.")
            return
        ctu_collect = args.ctu_phases[0]
        ctu_analyze = args.ctu_phases[1]

    if 'stats_enabled' in args and args.stats_enabled:
        if ClangSA.ANALYZER_NAME not in analyzers:
            LOG.debug("Statistics can only be used with "
                      "the Clang Static Analyzer.")
            return

    actions = prepare_actions(actions, analyzers)
    config_map = analyzer_types.build_config_handlers(args, context, analyzers)

    if 'stats_enabled' in args:
        config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
            SpecialReturnValueCollector.checker_analyze)

        config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
            ReturnValueCollector.checker_analyze)

    # Save some metadata information.
    versions = __get_analyzer_version(context, config_map)
    metadata['versions'].update(versions)

    metadata['checkers'] = {}
    for analyzer in analyzers:
        metadata['checkers'][analyzer] = {}

        for check, data in config_map[analyzer].checks().items():
            enabled, _ = data
            metadata['checkers'][analyzer].update({check: enabled})

    if ctu_collect:
        shutil.rmtree(ctu_dir, ignore_errors=True)
    elif ctu_analyze and not os.path.exists(ctu_dir):
        LOG.error("CTU directory: '%s' does not exist.", ctu_dir)
        return

    start_time = time.time()

    # Use Manager to create data objects which can be
    # safely shared between processes.
    manager = SyncManager()
    manager.start(__mgr_init)

    config_map = manager.dict(config_map)
    actions_map = create_actions_map(actions, manager)

    # Setting to not None value will enable statistical analysis features.
    statistics_data = __get_statistics_data(args, manager)

    if ctu_collect or statistics_data:
        ctu_data = None
        if ctu_collect or ctu_analyze:
            ctu_data = manager.dict({'ctu_dir': ctu_dir,
                                     'ctu_func_map_file': 'externalFnMap.txt',
                                     'ctu_temp_fnmap_folder':
                                     'tmpExternalFnMaps'})

        pre_analyze = [a for a in actions
                       if a.analyzer_type == ClangSA.ANALYZER_NAME]
        pre_analysis_manager.run_pre_analysis(pre_analyze,
                                              context,
                                              config_map,
                                              args.jobs,
                                              skip_handler,
                                              ctu_data,
                                              statistics_data,
                                              manager)

    if 'stats_output' in args and args.stats_output:
        return

    if 'stats_dir' in args and args.stats_dir:
        statistics_data = manager.dict({'stats_out_dir': args.stats_dir})

    ctu_reanalyze_on_failure = 'ctu_reanalyze_on_failure' in args and \
        args.ctu_reanalyze_on_failure

    if ctu_analyze or statistics_data or (not ctu_analyze and not ctu_collect):
#.........这里部分代码省略.........
开发者ID:Ericsson,项目名称:codechecker,代码行数:103,代码来源:analyzer.py

示例13: run_batch_predictions

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import start [as 别名]
def run_batch_predictions(base_url, base_headers, user, pwd,
                          api_token, create_api_token,
                          pid, lid, import_id, n_retry, concurrent,
                          resume, n_samples,
                          out_file, keep_cols, delimiter,
                          dataset, pred_name,
                          timeout, ui, fast_mode, auto_sample,
                          dry_run, encoding, skip_dialect,
                          skip_row_id=False,
                          output_delimiter=None,
                          max_batch_size=None, compression=None,
                          field_size_limit=None,
                          verify_ssl=True):

    if field_size_limit is not None:
        csv.field_size_limit(field_size_limit)

    if max_batch_size is None:
        max_batch_size = MAX_BATCH_SIZE

    multiprocessing.freeze_support()
    t1 = time()
    queue_size = concurrent * 2
    #  provide version info and system info in user-agent
    base_headers['User-Agent'] = 'datarobot_batch_scoring/{}|' \
                                 'Python/{}|{}|system/{}|concurrency/{}' \
                                 ''.format(__version__,
                                           sys.version.split(' ')[0],
                                           requests.utils.default_user_agent(),
                                           platform.system(),
                                           concurrent)

    with ExitStack() as stack:
        if my_os_cannot_handle_life_in_the_fast_lane():
            #  Windows requires an additional manager process. The locks
            #  and queues it creates are proxies for objects that exist within
            #  the manager itself. It does not perform as well so we only
            #  use it when necessary.

            manager = SyncManager()
            manager.start(initializer=manager_init)

            conc_manager = stack.enter_context(manager)
        else:
            #  You're on a nix of some sort and don't need a manager process.
            conc_manager = multiprocessing
        network_queue = conc_manager.Queue(queue_size)
        network_deque = conc_manager.Queue(queue_size)
        writer_queue = conc_manager.Queue(queue_size)
        progress_queue = conc_manager.Queue()

        shovel_status = conc_manager.Value('c', b'-', lock=False)
        network_status = conc_manager.Value('c', b'-', lock=False)
        writer_status = conc_manager.Value('c', b'-', lock=False)
        abort_flag = conc_manager.Value('b', 0, lock=False)

        base_headers['content-type'] = 'text/csv; charset=utf8'
        if compression:
            base_headers['Content-Encoding'] = 'gzip'
        if import_id:
            endpoint = base_url + '/'.join((import_id, 'predict'))
        else:
            endpoint = base_url + '/'.join((pid, lid, 'predict'))
        encoding = investigate_encoding_and_dialect(
            dataset=dataset,
            sep=delimiter, ui=ui,
            fast=fast_mode,
            encoding=encoding,
            skip_dialect=skip_dialect,
            output_delimiter=output_delimiter)
        if auto_sample:
            #  override n_sample
            n_samples = auto_sampler(dataset, encoding, ui)
            ui.info('auto_sample: will use batches of {} rows'
                    ''.format(n_samples))
        # Make a sync request to check authentication and fail early
        first_row = peek_row(dataset, delimiter, ui, fast_mode, encoding)
        ui.debug('First row for auth request: {}'.format(first_row))
        if fast_mode:
            chunk_formatter = fast_to_csv_chunk
        else:
            chunk_formatter = slow_to_csv_chunk
        first_row_data = chunk_formatter(first_row.data, first_row.fieldnames)
        first_row = first_row._replace(data=first_row_data)

        if keep_cols:
            # If any columns appear in `keep_cols` that are not in
            # `first_row.fieldnames`, it is a fatal error.
            extra_cols = set(keep_cols) - set(first_row.fieldnames)
            if extra_cols:
                msg = 'keep_cols "{}" not in columns {}.'.format(
                    list(sorted(extra_cols)),
                    first_row.fieldnames)
                ui.fatal(msg)

        if not dry_run:

            if not (api_token or import_id):
                try:
                    api_token = acquire_api_token(base_url, base_headers, user,
#.........这里部分代码省略.........
开发者ID:leigh-johnson,项目名称:batch-scoring,代码行数:103,代码来源:batch_scoring.py

示例14: run_guesses

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import start [as 别名]
def run_guesses(
	hash_comp, cracker, algorithm,
	prefix="", postfix="",
	progress=False, debug=False
):
	iter_stopped = False
	threads_running = []
	total_guessed = 0
	total_started = 0

	proc_manager = SyncManager()
	proc_manager.start()
	proc_returns = proc_manager.Queue(config.MAX_THREADS)

	cracker.initialize()

	while True:
		# check return queue, and quit if the hash has completed cracking
		ret = None
		while not proc_returns.empty():
			thread_guessed, answer = proc_returns.get()
			total_guessed += thread_guessed
			if answer is not False:
				if debug: print("WIN:", answer)
				for thread in threads_running:
					thread.terminate()
				ret = answer

		if ret is not None:
			proc_manager.shutdown()
			return (ret, total_guessed)

		# check if any threads terminated
		dead_threads = 0
		for i in xrange(0, len(threads_running)):
			if not threads_running[i-dead_threads].is_alive():
				threads_running.pop(i-dead_threads).terminate()
				if progress: print("GUESSED SO FAR:", total_guessed)
				dead_threads += 1

		# spawn as many new threads as you can
		threads_spawned = 0
		if debug: print(iter_stopped, threads_running, config.MAX_THREADS)
		while not iter_stopped and len(threads_running) < config.MAX_THREADS:
			try:
				guesses = cracker.next()
				total_started += cracker.max_per_thread()

			except StopIteration:
				iter_stopped = True
				break

			p = guess_thread(
				proc_manager, proc_returns,
				hash_comp, guesses, algorithm,
				prefix, postfix
			)
			p.start()
			threads_running.append(p)

			threads_spawned += 1
			del guesses

		if iter_stopped and len(threads_running) == 0 and proc_returns.empty():
			return (False, total_guessed)
		elif threads_spawned == 0:
			time.sleep(config.WAIT_TIME)
			continue
开发者ID:BCable,项目名称:threaded_crack,代码行数:70,代码来源:threaded_crack.py

示例15: StorageManager

# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import start [as 别名]
class StorageManager(object):
    '''
    Manager for per sensor persistent storage.

    Uses `multiprocessing.managers.SyncManager` to give sensors access
    to a dict-like structure, which automagically synchronizes with
    the main process.

    Values are stored in sqlite as stringified JSON documents.
    '''
    # TODO #1505: check if storage manager is pickable
    def __init__(self, sqlite_factory):
        '''
        Initializes sync manager and logger.
        '''
        self.log = logger(self.__class__.__name__)
        self.storages = {}
        self.manager = SyncManager()

        def ignore_signals():
            '''
            Ignores SIGINT and SIGTERM.
            We don't want them propagated to SyncManager, because
            we want to store its' state to disk on Agent shutdown.
            '''
            signal.signal(signal.SIGINT, signal.SIG_IGN)
            signal.signal(signal.SIGTERM, signal.SIG_IGN)

        self.manager.start(ignore_signals)

        self.ppid = None

        self.sqliteconn = sqlite_factory()

    def get_storage(self, name):
        '''
        Retrieves storage for given name. If such storage doesn't exist,
        a new one, possibly with data got from sqlite, will be created.

        Note that name is not necessarily sensor's name. In fact,
        most of the time it will be sensor_name+hash(sensor_config).
        This way we can differentiate storages within one sensor type.
        '''
        self.log.debug('Storage requested for `{}`'.format(name))

        if name in self.storages:
            return self.storages[name]

        cursor = self.sqliteconn.cursor()
        cursor.execute("SELECT value FROM sensorstorage WHERE key=?", (name,))
        try:
            storage_data = json.loads(cursor.fetchone())
        # Catching too general exception
        # pylint: disable=W0703
        except Exception:
            storage_data = {}
        # Instance of 'SyncManager' has no 'dict' member
        # pylint: disable=E1101
        self.storages[name] = self.manager.dict(storage_data)
        return self.storages[name]

    def shutdown(self):
        '''
        Flushes all remaining storages into sqlite
        and shuts down manager.
        '''
        self.log.debug('Storage manager: Started shutdown')

        cursor = self.sqliteconn.cursor()
        for sensor, store in self.storages.iteritems():
            cursor.execute(
                'INSERT OR REPLACE INTO sensorstorage'
                ' (key, value) VALUES (?,?)',
                (sensor, json.dumps(dict(store))),
            )
        self.sqliteconn.commit()
        self.sqliteconn.close()
        self.manager.shutdown()

        self.log.debug('Storage manager: Finished shutdown')
开发者ID:whitehats,项目名称:monitowl-agent,代码行数:82,代码来源:agent.py


注:本文中的multiprocessing.managers.SyncManager.start方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。