当前位置: 首页>>代码示例>>Python>>正文


Python Manager.values方法代码示例

本文整理汇总了Python中multiprocessing.Manager.values方法的典型用法代码示例。如果您正苦于以下问题:Python Manager.values方法的具体用法?Python Manager.values怎么用?Python Manager.values使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.Manager的用法示例。


在下文中一共展示了Manager.values方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: parallel_query

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import values [as 别名]
 def parallel_query(self, low_res_patches):
     """Query the high resolution patches for the given low resolution patches using
     multiprocessing.
     @param low_res_patches: given low resolution patches
     @type low_res_patches: L{numpy.array}
     @return: high resolution patches in row vector form
     @rtype: L{numpy.array}
     """
     if self._need_update:
         self._update()
     cpu_count = multiprocessing.cpu_count()
     patch_number, patch_dimension = np.shape(low_res_patches)
     batch_number = patch_number / cpu_count + 1
     jobs = []
     result = Manager().dict()
     for id in range(cpu_count):
         batch = low_res_patches[id*batch_number:(id+1)*batch_number, :]
         job = Process(target=self.query, args=(batch, id, result))
         jobs.append(job)
         job.start()
     for job in jobs:
         job.join()
     high_res_patches = np.concatenate(result.values())
     return high_res_patches
开发者ID:schen59,项目名称:SRLab,代码行数:26,代码来源:sr_dataset.py

示例2: Analyzer

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import values [as 别名]

#.........这里部分代码省略.........

        # Collate process-specific dicts to main dicts
        with self.lock:
            for key, value in anomaly_breakdown.items():
                if key not in self.anomaly_breakdown:
                    self.anomaly_breakdown[key] = value
                else:
        	        self.anomaly_breakdown[key] += value

            for key, value in exceptions.items():
                if key not in self.exceptions:
                    self.exceptions[key] = value
                else:
        	        self.exceptions[key] += value

    def run(self):
        """
        Called when the process intializes.
        """
        while 1:
            now = time()

            # Make sure Redis is up
            try:
                self.redis_conn.ping()
            except:
                logger.error('skyline can\'t connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
                sleep(10)
                self.redis_conn = StrictRedis(unix_socket_path = settings.REDIS_SOCKET_PATH)
                continue

            # Discover unique metrics
            unique_metrics = list(self.redis_conn.smembers(settings.FULL_NAMESPACE + 'unique_metrics'))

            if len(unique_metrics) == 0:
                logger.info('no metrics in redis. try adding some - see README')
                sleep(10)
                continue

            # Spawn processes
            pids = []
            for i in range(1, settings.ANALYZER_PROCESSES + 1):
                if i > len(unique_metrics):
                    logger.info('WARNING: skyline is set for more cores than needed.')
                    break

                p = Process(target=self.spin_process, args=(i, unique_metrics))
                pids.append(p)
                p.start()

            # Send wait signal to zombie processes
            for p in pids:
                p.join()

            # Write anomalous_metrics to static webapp directory
            filename = path.abspath(path.join(path.dirname( __file__ ), '..', settings.ANOMALY_DUMP))
            with open(filename, 'w') as fh:
                # Make it JSONP with a handle_data() function
                anomalous_metrics = list(self.anomalous_metrics)
                anomalous_metrics.sort(key=operator.itemgetter(1))
                fh.write('handle_data(%s)' % anomalous_metrics)

            # Log progress
            logger.info('seconds to run    :: %.2f' % (time() - now))
            logger.info('total metrics     :: %d' % len(unique_metrics))
            logger.info('total analyzed    :: %d' % (len(unique_metrics) - sum(self.exceptions.values())))
            logger.info('total anomalies   :: %d' % len(self.anomalous_metrics))
            logger.info('exception stats   :: %s' % self.exceptions)
            logger.info('anomaly breakdown :: %s' % self.anomaly_breakdown)

            # Log to Graphite
            if settings.GRAPHITE_HOST != '':
                host = settings.GRAPHITE_HOST.replace('http://', '')
                system('echo skyline.analyzer.run_time %.2f %s | nc -w 3 %s 2003' % ((time() - now), now, host))

            # Check canary metric
            raw_series = self.redis_conn.get(settings.FULL_NAMESPACE + settings.CANARY_METRIC)
            if raw_series is not None:
                unpacker = Unpacker(use_list = False)
                unpacker.feed(raw_series)
                timeseries = list(unpacker)
                time_human = (timeseries[-1][0] - timeseries[0][0]) / 3600
                projected = 24 * (time() - now) / time_human

                logger.info('canary duration   :: %.2f' % time_human)
                if settings.GRAPHITE_HOST != '':
                    host = settings.GRAPHITE_HOST.replace('http://', '')
                    system('echo skyline.analyzer.duration %.2f %s | nc -w 3 %s 2003' % (time_human, now, host))
                    system('echo skyline.analyzer.projected %.2f %s | nc -w 3 %s 2003' % (projected, now, host))


            # Reset counters
            self.anomalous_metrics[:] = []
            self.exceptions = Manager().dict()
            self.anomaly_breakdown = Manager().dict()

            # Sleep if it went too fast
            if time() - now < 5:
                logger.info('sleeping due to low run time...')
                sleep(10)
开发者ID:HeTyHuka,项目名称:skyline,代码行数:104,代码来源:analyzer.py

示例3: __init__

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import values [as 别名]
class EntryPoint:
    Log = logging.getLogger(__name__)

    def __init__(self):
        self.__total_size = Value('i', 0)
        self.__sizes_by_file = Manager().dict()

    def main(self):
        urls = ['https://code.jquery.com/jquery-git.js',
                'https://code.jquery.com/jquery-3.1.0.js',
                'https://code.jquery.com/jquery-3.0.0.js',
                'https://code.jquery.com/jquery-2.2.0.js',
                'https://code.jquery.com/jquery-2.1.0.js',
                'https://code.jquery.com/jquery-2.0.0.js',
                'https://code.jquery.com/jquery-1.12.0.js',
                'https://code.jquery.com/jquery-1.11.0.js',
                'https://code.jquery.com/jquery-1.10.0.js',
                'https://code.jquery.com/jquery-1.9.0.js',
                'https://code.jquery.com/jquery-1.7.0.js',
                'https://code.jquery.com/jquery-1.6.js',
                'https://code.jquery.com/jquery-1.5.js',
                'https://code.jquery.com/jquery-1.4.js',
                'https://code.jquery.com/jquery-1.3.js',
                'https://code.jquery.com/jquery-1.2.js',
                'https://code.jquery.com/jquery-1.1.js',
                'https://code.jquery.com/jquery-1.0.js']

        self.__compute_serially(urls)
        self.__compute_with_threadpool(urls)

    def __compute_serially(self, urls):
        start_time = datetime.utcnow()

        sizes_by_file = dict()
        for url in urls:
            sizes_by_file[url] = self.__get_size_of_file(url)
        self.Log.info('Total size of all {0} URLs: {1}'.format(len(urls), sum(sizes_by_file.values())))

        time_diff = datetime.utcnow() - start_time
        self.Log.info("Serial version took: {0}".format(self.get_timespan(time_diff.seconds)))

    def __compute_with_threadpool(self, urls):
        start_time = datetime.utcnow()

        pool = ThreadPool(processes=8)
        pool.map(self.__get_size_of_file_in_parallel, urls)
        self.Log.info('Total size of all {0} URLs: {1}'.format(len(urls), sum(self.__sizes_by_file.values())))

        time_diff = datetime.utcnow() - start_time
        self.Log.info("Threadpool version took: {0}".format(self.get_timespan(time_diff.seconds)))

    def __get_size_of_file_in_parallel(self, url):
        self.__sizes_by_file[url] = self.__get_size_of_file(url)
        # with self.__total_size.get_lock():
        #    self.__total_size.value += self.__get_size_of_file(url)

    @staticmethod
    def __get_size_of_file(url):
        with urllib.request.urlopen(url) as f:
            contents = f.read()
        return len(contents)

    @staticmethod
    def get_timespan(seconds):
        m, s = divmod(seconds, 60)
        h, m = divmod(m, 60)
        return "%d:%02d:%02d" % (h, m, s)
开发者ID:russcollier,项目名称:SamplesAndNuggets,代码行数:69,代码来源:threadpool_example.py

示例4: Analyzer

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import values [as 别名]

#.........这里部分代码省略.........
            for i in range(1, settings.ANALYZER_PROCESSES + 1):
                if i > len(unique_metrics):
                    logger.info('WARNING: skyline is set for more cores than needed.')
                    break

                p = Process(target=self.spin_process, args=(i, unique_metrics))
                pids.append(p)
                p.start()

            # Send wait signal to zombie processes
            for p in pids:
                p.join()

            # Send alerts
            #if settings.ENABLE_ALERTS:
            #    for alert in settings.ALERTS:
            #        for metric in self.anomalous_metrics:
            #            if alert[0] in metric[1]:
            #                try:
            #                    last_alert = self.redis_conn.get('last_alert.' + metric[1])
            #                    if not last_alert:
            #                        self.redis_conn.setex('last_alert.' + metric[1], alert[2], packb(metric[0]))
            #                        self.send_mail(alert, metric)
            #                except Exception as e:
            #                    logger.error("couldn't send alert: %s" % e)

            # Write anomalous_metrics to static webapp directory
            filename = path.abspath(path.join(path.dirname( __file__ ), '..', settings.ANOMALY_DUMP))
            with open(filename, 'w') as fh:
                # Make it JSONP with a handle_data() function
                anomalous_metrics = list(self.anomalous_metrics)
                anomalous_metrics.sort(key=operator.itemgetter(1))
                fh.write('handle_data(%s)' % anomalous_metrics)
            
            # process anomalous metrics
            for metric in self.anomalous_metrics:
                try:
                    last_save_key = 'last_save.%s.%s' % (metric[1], metric[2])
                    last_save = self.redis_conn.get(last_save_key)
                    if not last_save:
                        self.redis_conn.setex(last_save_key,
                            settings.SKIP_FREQUENCY, packb(metric[0]))
                        self.storage.save(metric)
                    if settings.ENABLE_ALERTS:
                        last_alert_key = 'last_alert.' + metric[1]
                        last_alert = self.redis_conn.get(last_alert_key)
                        if not last_alert:
                            self.redis_conn.setex(last_alert_key,
                                settings.SKIP_FREQUENCY, packb(metric[0]))
                            self.alerter.add(metric)
                except Exception as e:
                    logger.error("Failed processing anomaly, pid: %s, metric: %s, error: %s",
                        getpid(), metric[1], e)
            
            # send ready alerts
            if settings.ENABLE_ALERTS:
                try:
                    self.alerter.send_alerts()
                except Exception as e:
                    logger.error("Failed sending alerts, error: %s", e)

            # Log progress
            logger.info('seconds to run    :: %.2f' % (time() - now))
            logger.info('total metrics     :: %d' % len(unique_metrics))
            logger.info('total analyzed    :: %d' % (len(unique_metrics) - sum(self.exceptions.values())))
            logger.info('total anomalies   :: %d' % len(self.anomalous_metrics))
            logger.info('exception stats   :: %s' % self.exceptions)
            logger.info('anomaly breakdown :: %s' % self.anomaly_breakdown)

            # Log to Graphite
            if settings.GRAPHITE_HOST != '':
                host = settings.GRAPHITE_HOST.replace('http://', '')
                system('echo skyline.analyzer.run_time %.2f %s | nc -w 3 %s 2003' % ((time() - now), now, host))
                system('echo skyline.analyzer.total_analyzed %d %s | nc -w 3 %s 2003' % ((len(unique_metrics) - sum(self.exceptions.values())), now, host))

            # Check canary metric
            raw_series = self.redis_conn.get(settings.FULL_NAMESPACE + settings.CANARY_METRIC)
            if raw_series is not None:
                unpacker = Unpacker(use_list = False)
                unpacker.feed(raw_series)
                timeseries = list(unpacker)
                time_human = (timeseries[-1][0] - timeseries[0][0]) / 3600
                projected = 24 * (time() - now) / time_human

                logger.info('canary duration   :: %.2f' % time_human)
                if settings.GRAPHITE_HOST != '':
                    host = settings.GRAPHITE_HOST.replace('http://', '')
                    system('echo skyline.analyzer.duration %.2f %s | nc -w 3 %s 2003' % (time_human, now, host))
                    system('echo skyline.analyzer.projected %.2f %s | nc -w 3 %s 2003' % (projected, now, host))


            # Reset counters
            self.anomalous_metrics[:] = []
            self.exceptions = Manager().dict()
            self.anomaly_breakdown = Manager().dict()

            # Sleep if it went too fast
            if time() - now < 5:
                logger.info('sleeping due to low run time...')
                sleep(10)
开发者ID:scalextremeinc,项目名称:skyline,代码行数:104,代码来源:analyzer.py


注:本文中的multiprocessing.Manager.values方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。