当前位置: 首页>>代码示例>>Python>>正文


Python pool.ThreadPool类代码示例

本文整理汇总了Python中multiprocessing.pool.ThreadPool的典型用法代码示例。如果您正苦于以下问题:Python ThreadPool类的具体用法?Python ThreadPool怎么用?Python ThreadPool使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了ThreadPool类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _test_herd_management

    def _test_herd_management(self, cache):
        globals()['call_count'] = 0

        def test_callable(v):
            global call_count
            call_count += 1

            sleep(0.1)
            return v

        pool = ThreadPool(processes=10)
        processes = []
        for _ in xrange(10):
            to_func = {
                'key': self._test_key,
                'callback': test_callable,
                'callback_params': {'v': 17},
            }

            async_result = pool.apply_async(
                cache.get_or_set, kwds=to_func
            )
            processes.append(async_result)

        results = []
        for thread in processes:
            thread.wait()
            results.append(thread.get())

        # Checking that callable method was applied only once
        self.assertEqual(globals()['call_count'], 1)

        # Checking results - they all should be the same
        self.assertEqual(results, [17] * 10)
开发者ID:lemurchik,项目名称:django-extmemcachedcache,代码行数:34,代码来源:tests.py

示例2: _test_monitor_tables_locking_errors

	def _test_monitor_tables_locking_errors(self):
		"""Test that intensive read/write operations to the MySQL Monitor tables
		do not trigger locking errors.

		This test will be successful if there will be no generated error at
		the end.
		"""

		# Setting these variables will cause the Monitor to connect more
		# frequently to the backend hosts to check their health, thus increasing
		# the probability of locking errors to appear.
		self.run_query_proxysql_admin("UPDATE global_variables SET variable_value=100 WHERE variable_name='mysql-monitor_connect_interval'")
		self.run_query_proxysql_admin("UPDATE global_variables SET variable_value=100 WHERE variable_name='mysql-monitor_ping_interval'")
		self.run_query_proxysql_admin("LOAD MYSQL VARIABLES TO RUNTIME")

		queries = []
		q1 = "select * from monitor.mysql_server_connect_log ORDER BY RANDOM() LIMIT 10"
		q2 = "select * from monitor.mysql_server_ping_log ORDER BY RANDOM() LIMIT 10"
		for _ in xrange(10000):
			queries.append(random.choice([q1, q2]))

		pool = ThreadPool(processes=5)
		pool.map(self.run_query_proxysql_admin, queries)

		# If we reached this point without an error, it means that the test
		# has passed.
		self.assertEqual(1, 1)
开发者ID:AlexeyDeyneko,项目名称:proxysql,代码行数:27,代码来源:admin_tables_test.py

示例3: test_threadsafe

 def test_threadsafe(self):
     # Ensure that the eventdb is thread-safe by hammering on it with
     # multiple threads simultaneously. We should only get one positive.
     pool = ThreadPool(10)
     results = pool.map(self.event_db.check_event, repeat(self.event, 1000))
     self.assertEqual(results.count(True), 1)
     self.assertEqual(results.count(False), 999)
开发者ID:jdswinbank,项目名称:Comet,代码行数:7,代码来源:test_event_db.py

示例4: __init__

    def __init__(self):
        super(Foo,self).__init__(400,400)
        l = pyglet.text.Label('FOOBAR',font_name="Courier Sans",font_size=20,x=self.width//2,y=self.height//2,multiline=True,width=200)


        pool = ThreadPool(processes=1)
        self.r = pool.apply_async(foo)


        @self.event
        def on_key_press(s,m):
            if s == pyglet.window.key.C:
                print("EXTERNAL")
                l.text = self.r.get()


        @self.event
        def on_draw():
            self.clear()
            
            l.draw()
            count = 10
            offset =(2*pi)/ 10.0
            for i in range(count):
                line((200,200),(200+cos(offset*i)*100,200+sin(offset*i)*100))

        pyglet.app.run()
开发者ID:xoryouyou,项目名称:NetArgos,代码行数:27,代码来源:pool.py

示例5: fetch_plugins

def fetch_plugins(old_index):
    ans = {}
    pool = ThreadPool(processes=10)
    entries = tuple(parse_index())
    result = pool.map(partial(parallel_fetch, old_index), entries)
    for entry, plugin in zip(entries, result):
        if isinstance(plugin, dict):
            ans[entry.name] = plugin
        else:
            if entry.name in old_index:
                ans[entry.name] = old_index[entry.name]
            log('Failed to get plugin', entry.name, 'at', datetime.utcnow().isoformat(), 'with error:')
            log(plugin)
    # Move staged files
    for plugin in ans.itervalues():
        if plugin['file'].startswith('staging_'):
            src = plugin['file']
            plugin['file'] = src.partition('_')[-1]
            os.rename(src, plugin['file'])
    raw = bz2.compress(json.dumps(ans, sort_keys=True, indent=4, separators=(',', ': ')))
    atomic_write(raw, PLUGINS)
    # Cleanup any extra .zip files
    all_plugin_files = {p['file'] for p in ans.itervalues()}
    extra = set(glob.glob('*.zip')) - all_plugin_files
    for x in extra:
        os.unlink(x)
    return ans
开发者ID:BatteringRam,项目名称:calibre,代码行数:27,代码来源:plugins_mirror.py

示例6: analyze_commits

def analyze_commits(project_name, target_repo, existing_target_branches, fork_list):
    print 'Analyzing commits'

    pool = ThreadPool(processes=10)

    existing_target_commits = []

    for fork_repo in fork_list:
        for target_branch in existing_target_branches:

            print '    Analyzing %s (branch: %s) ' % (fork_repo.full_name, target_branch),
            fork_repo_commits = fork_repo.get_commits(sha=target_branch)

            max_commits_to_analyze = 30
            analyzed_commits = 0

            fork_commits_to_analyze = []

            for fork_comm in fork_repo_commits:
                if analyzed_commits == max_commits_to_analyze:
                    break

                fork_commits_to_analyze.append(fork_comm)

                analyzed_commits += 1

            partial_c_in_root = functools.partial(commit_is_in_root,
                                                  existing_target_commits,
                                                  target_repo, fork_repo)

            pool.map(partial_c_in_root, fork_commits_to_analyze)
            print
开发者ID:andresriancho,项目名称:w3af-misc,代码行数:32,代码来源:fork_info.py

示例7: _fit

    def _fit(self, dataset):
        est = self.getOrDefault(self.estimator)
        epm = self.getOrDefault(self.estimatorParamMaps)
        numModels = len(epm)
        eva = self.getOrDefault(self.evaluator)
        tRatio = self.getOrDefault(self.trainRatio)
        seed = self.getOrDefault(self.seed)
        randCol = self.uid + "_rand"
        df = dataset.select("*", rand(seed).alias(randCol))
        condition = (df[randCol] >= tRatio)
        validation = df.filter(condition).cache()
        train = df.filter(~condition).cache()

        def singleTrain(paramMap):
            model = est.fit(train, paramMap)
            metric = eva.evaluate(model.transform(validation, paramMap))
            return metric

        pool = ThreadPool(processes=min(self.getParallelism(), numModels))
        metrics = pool.map(singleTrain, epm)
        train.unpersist()
        validation.unpersist()

        if eva.isLargerBetter():
            bestIndex = np.argmax(metrics)
        else:
            bestIndex = np.argmin(metrics)
        bestModel = est.fit(dataset, epm[bestIndex])
        return self._copyValues(TrainValidationSplitModel(bestModel, metrics))
开发者ID:Altiscale,项目名称:spark,代码行数:29,代码来源:tuning.py

示例8: bench_compression_comparison

def bench_compression_comparison(n_chunks, df_length, append_mul, pool_size, pool_step, repeats,
                                 use_raw_lz4, use_HC):
    _str = construct_test_data(df_length, append_mul)
    chunk_size = len(_str) / 1024 ** 2.0
    _strarr = [_str] * n_chunks

    # Single threaded
    # ---------------
    measurements = bench_single(repeats, _strarr, use_HC)
    print_results(1, chunk_size, n_chunks, chunk_size*n_chunks, measurements)
    single_mean = np.mean(measurements)

    # Multi-threaded
    # --------------
    for sz in range(2, pool_size + 1, pool_step):
        if use_raw_lz4:
            pool = ThreadPool(sz)
        else:
            pool = None
            c.set_compression_pool_size(sz)
        measurements = bench_multi(repeats, _strarr, use_HC, pool=pool)
        print_results(sz, chunk_size, n_chunks, chunk_size * n_chunks, measurements, compare=single_mean)
        if pool:
            pool.close()
            pool.join()
    print("")
开发者ID:manahl,项目名称:arctic,代码行数:26,代码来源:benchmark_lz4.py

示例9: _load_lyrics

    def _load_lyrics(self, songdict):
        total = []
        for songlist in songdict.values():
            total += songlist

        pool = ThreadPool()
        pool.map(Song.load, total)
开发者ID:ajm188,项目名称:fugl,代码行数:7,代码来源:tswizzle.py

示例10: copytree_and_gzip

    def copytree_and_gzip(self, source_dir, target_dir):
        """
        Copies the provided source directory to the provided target directory.

        Gzips JavaScript, CSS and HTML and other files along the way.
        """
        # Figure out what we're building...
        build_list = []
        # Walk through the source directory...
        for (dirpath, dirnames, filenames) in os.walk(source_dir):
            for f in filenames:
                # Figure out what is going where
                source_path = os.path.join(dirpath, f)
                rel_path = os.path.relpath(dirpath, source_dir)
                target_path = os.path.join(target_dir, rel_path, f)
                # Add it to our list to build
                build_list.append((source_path, target_path))

        logger.debug("Gzipping {} files".format(len(build_list)))

        # Build em all
        if not getattr(self, 'pooling', False):
            [self.copyfile_and_gzip(*u) for u in build_list]
        else:
            cpu_count = multiprocessing.cpu_count()
            logger.debug("Pooling build on {} CPUs".format(cpu_count))
            pool = ThreadPool(processes=cpu_count)
            pool.map(self.pooled_copyfile_and_gzip, build_list)
开发者ID:datadesk,项目名称:django-bakery,代码行数:28,代码来源:build.py

示例11: run_command

    def run_command(self, command, *args, **opts):
        if len(self.members) <= 0:
            raise TomcatError("Cluster has no members")
        hosts = opts.setdefault('hosts', self.members.keys())
        threads = opts.setdefault('threads',
                      min(self.member_count(), self.max_threads))
        abort_on_error = opts.setdefault('abort_on_error', False)
        if abort_on_error:
            abort = Value('b', 0)

        def run_cmd(host):
            try:
                if abort_on_error and abort.value:
                    raise TomcatError('Aborted')
                self.log.debug("Performing %s%s on %s", command, args, host)
                self._run_progress_callback(event=events.CMD_START,
                        command=command, args=args, node=host)

                rv = getattr(self.members[host], command)(*args)

                self._run_progress_callback(event=events.CMD_END,
                        command=command, args=args, node=host)
            except Exception as e:
                if abort_on_error:
                    abort.value = True
                rv = e
            return (host, rv)

        pool = ThreadPool(processes=threads)
        return ClusterCommandResults(pool.map(run_cmd, hosts))
开发者ID:mvalenzuelaDandB,项目名称:pytomcat,代码行数:30,代码来源:__init__.py

示例12: resolve_playlist

def resolve_playlist(url):
    resolve_pool = ThreadPool(processes=16)
    logger.info("Resolving YouTube-Playlist '%s'", url)
    playlist = []

    page = 'first'
    while page:
        params = {
            'playlistId': url,
            'maxResults': 50,
            'key': yt_key,
            'part': 'contentDetails'
        }
        if page and page != "first":
            logger.debug("Get YouTube-Playlist '%s' page %s", url, page)
            params['pageToken'] = page

        result = session.get(yt_api_endpoint+'playlistItems', params=params)
        data = result.json()
        page = data.get('nextPageToken')

        for item in data["items"]:
            video_id = item['contentDetails']['videoId']
            playlist.append(video_id)

    playlist = resolve_pool.map(resolve_url, playlist)
    resolve_pool.close()
    return [item for item in playlist if item]
开发者ID:Chateaudur,项目名称:mopidy-youtube,代码行数:28,代码来源:backend.py

示例13: StartInfrastructure

	def StartInfrastructure(inf_id, auth):
		"""
		Start all virtual machines in an infrastructure previously stopped.

		Args:

		- inf_id(str): infrastructure id.
		- auth(Authentication): parsed authentication tokens.

		Return(str): error messages; empty string means all was ok.
		"""

		InfrastructureManager.logger.info("Starting the infrastructure id: " + str(inf_id))

		sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth)
		exceptions = []
		if Config.MAX_SIMULTANEOUS_LAUNCHES > 1:
			pool = ThreadPool(processes=Config.MAX_SIMULTANEOUS_LAUNCHES)
			pool.map(
				lambda vm: InfrastructureManager._start_vm(vm, auth, exceptions), 
				reversed(sel_inf.get_vm_list())
				)
		else:
			for vm in sel_inf.get_vm_list():
				InfrastructureManager._start_vm(vm, auth, exceptions)

		if exceptions:
			msg = ""
			for e in exceptions:
				msg += str(e) + "\n"
			raise Exception("Error starting the infrastructure: %s" % msg)

		InfrastructureManager.logger.info("Infrastructure successfully restarted")
		return ""
开发者ID:lxhiguera,项目名称:im,代码行数:34,代码来源:InfrastructureManager.py

示例14: monitorthread

    def monitorthread(self):
        stock_codes = []
        for item in self.conn.mystock.todaydata.find():

            stock_codes.append(item['code'])
        pool = ThreadPool(40)
        pool.map(self.proxy, stock_codes)
开发者ID:tuoxie119,项目名称:stocktrade,代码行数:7,代码来源:fenxing.py

示例15: collect_logs

  def collect_logs(self):
    """Collect all the microservice log files."""
    log_dir = os.path.join(self.options.log_dir, 'service_logs')
    if not os.path.exists(log_dir):
      os.makedirs(log_dir)

    def fetch_service_log(service):
      try:
        logging.debug('Fetching logs for "%s"...', service)
        deployer = (self if service in HALYARD_SERVICES
                    else self.__spinnaker_deployer)
        deployer.do_fetch_service_log_file(service, log_dir)
      except Exception as ex:
        message = 'Error fetching log for service "{service}": {ex}'.format(
            service=service, ex=ex)
        if ex.message.find('No such file') >= 0:
          message += '\n    Perhaps the service never started.'
          # dont log since the error was already captured.
        else:
          logging.error(message)
          message += '\n{trace}'.format(
              trace=traceback.format_exc())

        write_data_to_secure_path(
            message, os.path.join(log_dir, service + '.log'))

    logging.info('Collecting server log files into "%s"', log_dir)
    all_services = list(SPINNAKER_SERVICES)
    all_services.extend(HALYARD_SERVICES)
    thread_pool = ThreadPool(len(all_services))
    thread_pool.map(fetch_service_log, all_services)
    thread_pool.terminate()
开发者ID:jtk54,项目名称:spinnaker,代码行数:32,代码来源:validate_bom__deploy.py


注:本文中的multiprocessing.pool.ThreadPool类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。