当前位置: 首页>>代码示例>>Python>>正文


Python ThreadPool.close方法代码示例

本文整理汇总了Python中multiprocessing.pool.ThreadPool.close方法的典型用法代码示例。如果您正苦于以下问题:Python ThreadPool.close方法的具体用法?Python ThreadPool.close怎么用?Python ThreadPool.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.pool.ThreadPool的用法示例。


在下文中一共展示了ThreadPool.close方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import close [as 别名]
def main():
    # Run the Tales 
    pool = ThreadPool(processes=int(tcfg['Workers'].get('pool_size', 10)))
    pool = ThreadPool()
    pool.map(worker, tales)
    pool.close()
    pool.join()
开发者ID:nickmaccarthy,项目名称:Tattle,代码行数:9,代码来源:tattled.py

示例2: main

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import close [as 别名]
def main(dir_path, outfile_path, is_journal=True):
    pn = 20
    flst = os.listdir(dir_path)
    arglst = []
    ret = dict()
    for i in range(pn):
        beg = int(math.ceil(float(len(flst)) / pn * i))
        end = int(math.ceil(float(len(flst)) / pn * (i + 1)))
        if(id == 0):
            beg = 0
        if(id == pn - 1):
            end = (len(flst))
        arglst.append([dir_path, is_journal, beg, end, i, ret])
    pool = ThreadPool(pn)
    pool.map(job_map, arglst)
    pool.close()
    pool.join()
    print(80 * '=')
    print('[acmdl]: map finished')
    print(80 * '=')
    job_reduce(ret, outfile_path)
    print(80 * '=')
    print('[acmdl]: reduce finished')
    print(80 * '=')
    return
开发者ID:dragonxlwang,项目名称:s3e,代码行数:27,代码来源:acm_proc.py

示例3: local_job_runner

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import close [as 别名]
def local_job_runner(cmds_list, num_threads, throw_error=True):
    """
    Execute a list of cmds locally using thread pool with at most
    num_threads threads, wait for all jobs to finish before exit.

    If throw_error is True, when any job failed, raise RuntimeError.
    If throw_error is False, return a list of cmds that failed.

    Parameters:
      cmds_list - cmds that will be executed in ThreadPool
      num_threads - number of threads that will be used in the ThreadPool
      throw_error - whether or not to throw RuntimeError when any of cmd failed.
      rescue - whether or not to rescue this job
      rescue_times - maximum number of rescue times
    """
    run_cmd_in_shell = lambda x: backticks(x, merge_stderr=True)
    try:
        pool = ThreadPool(processes=num_threads)
        rets = pool.map(run_cmd_in_shell, cmds_list)
        pool.close()
        pool.join()
    except subprocess.CalledProcessError:
        pass

    failed_cmds = [cmds_list[i] for i in range(0, len(cmds_list)) if rets[i][1] != 0]
    failed_cmds_out = [rets[i][0] for i in range(0, len(cmds_list)) if rets[i][1] != 0]

    if throw_error and len(failed_cmds) > 0:
        errmsg = "\n".join(["CMD failed: %s, %s" % (cmd, out)
                            for (cmd, out) in zip(failed_cmds, failed_cmds_out)])
        raise RuntimeError(errmsg)
    else:
        return failed_cmds
开发者ID:lpp1985,项目名称:lpp_Script,代码行数:35,代码来源:RunnerUtils.py

示例4: extract

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import close [as 别名]
    def extract(url):
        """TODO: Docstring for extract.
        :returns: TODO

        """
        try:
            r = rs.get(url)
            soup = bs(r.text, 'html.parser')
            tr_list = soup.tbody.find_all('tr')
        except AttributeError:
            print r
            return None
        info_list = []
        for tr in tr_list:
            td_list = tr.find_all('td')
            if td_list[2].text.strip() == u'高匿名' and\
                    'HTTP' in td_list[3].text.strip(' ').split(',') and\
                    'GET' in td_list[4].text.strip(' ').split(','):
                infos = list()
                infos.append(td_list[0].text.strip())
                infos.append(td_list[1].text.strip())
                infos.append(td_list[6].text.strip()[:-1])
                if td_list[7].text.find(u'小时') != -1:
                    infos.append(float(td_list[7].text[:-3]) * 3600)
                else:
                    infos.append(float(td_list[7].text[:-3]) * 60)
                info_list.append(infos)
        p = Pool(len(info_list))
        proxy_list = p.map(wrapper, info_list)
        p.close()
        return proxy_list
开发者ID:myme5261314,项目名称:http_proxy_service,代码行数:33,代码来源:KuaidailiProxyGenerator.py

示例5: read

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import close [as 别名]
	def read(self, sftppath, localPath = None, numParallelConnections = 1):
		if localPath is None:
			localPath = os.getcwd() # local path - can be changed later
		sftp = paramiko.SFTPClient.from_transport(self.transport)
		if (numParallelConnections > 1):
			pool = ThreadPool(numParallelConnections)

		def getFile(sftppath, localpath):
			pconnection = SFTPConnection(self.connectionInfo)
			pconnection.connect()
			psftp = paramiko.SFTPClient.from_transport(pconnection.transport)
			psftp.get(sftppath, localpath)
			psftp.close()
			pconnection.close()

		def recursiveRead(sftp, sftppath, localPath):
			fileattr = sftp.lstat(sftppath)
			if not stat.S_ISDIR(fileattr.st_mode): #it is a file
				if (numParallelConnections > 1):
					pool.apply_async(getFile, args= (sftppath, os.path.join(localPath, os.path.basename(sftppath))))
				else:
					sftp.get(sftppath, os.path.join(localPath, os.path.basename(sftppath)))
			else: #it is a directory
				try: #creating local directory, using try-catch to handle race conditions
					os.makedirs(os.path.join(localPath, os.path.basename(sftppath)))
				except OSError as exception:
					if exception.errno != errno.EEXIST:
						raise
				for file in sftp.listdir_attr(sftppath):
					recursiveRead(sftp, os.path.join(sftppath, file.filename), os.path.join(localPath, os.path.basename(sftppath)))
		recursiveRead(sftp, sftppath, localPath)
		sftp.close()
		if (numParallelConnections > 1):
			pool.close()
			pool.join()
开发者ID:guptarajat,项目名称:data-connectors,代码行数:37,代码来源:sftp_connector.py

示例6: check_artifact_cache

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import close [as 别名]
  def check_artifact_cache(self, vts):
    """Checks the artifact cache for the specified VersionedTargetSets.

    Returns a list of the ones that were satisfied from the cache. These don't require building.
    """
    if not vts:
      return [], []

    cached_vts = []
    uncached_vts = OrderedSet(vts)
    if self._artifact_cache and self.context.options.read_from_artifact_cache:
      pool = ThreadPool(processes=6)
      res = pool.map(lambda vt: self._artifact_cache.use_cached_files(vt.cache_key),
                     vts, chunksize=1)
      pool.close()
      pool.join()
      for vt, was_in_cache in zip(vts, res):
        if was_in_cache:
          cached_vts.append(vt)
          uncached_vts.discard(vt)
          self.context.log.info('Using cached artifacts for %s' % vt.targets)
          vt.update()
        else:
          self.context.log.info('No cached artifacts for %s' % vt.targets)
    return cached_vts, list(uncached_vts)
开发者ID:alaattinturyan,项目名称:commons,代码行数:27,代码来源:__init__.py

示例7: _run_tests

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import close [as 别名]
    def _run_tests(self):
        "Runs the tests, produces no report."
        run_alone = []

        tests = self._tests
        pool = ThreadPool(self._worker_count)
        try:
            for cmd, options in tests:
                options = options or {}
                if matches(self._configured_run_alone_tests, cmd):
                    run_alone.append((cmd, options))
                else:
                    self._spawn(pool, cmd, options)
            pool.close()
            pool.join()

            if run_alone:
                util.log("Running tests marked standalone")
                for cmd, options in run_alone:
                    self._run_one(cmd, **options)
        except KeyboardInterrupt:
            try:
                util.log('Waiting for currently running to finish...')
                self._reap_all()
            except KeyboardInterrupt:
                pool.terminate()
                raise
        except:
            pool.terminate()
            raise
开发者ID:gevent,项目名称:gevent,代码行数:32,代码来源:testrunner.py

示例8: _initialize_clients

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import close [as 别名]
    def _initialize_clients(self):
        """
        Initialize all clients.
        """
        logger.info("Initializing FDSN client(s) for %s." % ", ".join(
            _i.base_url if hasattr(_i, "base_url") else _i
            for _i in self.providers))

        def _get_client(client_name):
            # It might already be an initialized client - in that case just
            # use it.
            if isinstance(client_name, Client):
                name, client = client_name.base_url, client_name
            else:
                try:
                    this_client = Client(client_name, debug=self.debug)
                    name, client = client_name, this_client
                except utils.ERRORS as e:
                    if "timeout" in str(e).lower():
                        extra = " (timeout)"
                    else:
                        extra = ""
                    logger.warn("Failed to initialize client '%s'.%s"
                                % (client_name, extra))
                    return client_name, None

            services = sorted([_i for _i in client.services.keys()
                               if not _i.startswith("available")])
            if "dataselect" not in services or "station" not in services:
                logger.info("Cannot use client '%s' as it does not have "
                            "'dataselect' and/or 'station' services."
                            % name)
                return name, None
            return name, client

        # Catch warnings in the main thread. The catch_warnings() context
        # manager does not reliably work when used in multiple threads.
        p = ThreadPool(len(self.providers))
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter("always")
            clients = p.map(_get_client, self.providers)
        p.close()
        for warning in w:
            logger.debug("Warning during initialization of one of the "
                         "clients: " + str(warning.message))

        clients = {key: value for key, value in clients if value is not None}

        # Write to initialized clients dictionary preserving order. Remember
        # that each passed provider might already be an initialized client
        # instance.
        for client in self.providers:
            if client not in clients and client not in clients.values():
                continue
            name = client.base_url if hasattr(client, "base_url") else client
            self._initialized_clients[name] = clients[name]

        logger.info("Successfully initialized %i client(s): %s."
                    % (len(self._initialized_clients),
                       ", ".join(self._initialized_clients.keys())))
开发者ID:calum-chamberlain,项目名称:obspy,代码行数:62,代码来源:mass_downloader.py

示例9: _listArtifacts

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import close [as 别名]
    def _listArtifacts(self, urls, gavs):
        """
        Loads maven artifacts from list of GAVs and tries to locate the artifacts in one of the
        specified repositories.

        :param urls: repository URLs where the given GAVs can be located
        :param gavs: List of GAVs
        :returns: Dictionary where index is MavenArtifact object and value is it's repo root URL.
        """
        def findArtifact(gav, urls, artifacts):
            artifact = MavenArtifact.createFromGAV(gav)
            for url in urls:
                if maven_repo_util.gavExists(url, artifact):
                    #Critical section?
                    artifacts[artifact] = ArtifactSpec(url)
                    return

            logging.warning('Artifact %s not found in any url!', artifact)

        artifacts = {}
        pool = ThreadPool(maven_repo_util.MAX_THREADS)
        for gav in gavs:
            pool.apply_async(findArtifact, [gav, urls, artifacts])

        # Close the pool and wait for the workers to finnish
        pool.close()
        pool.join()

        return artifacts
开发者ID:wfkbuilder,项目名称:maven-repository-builder,代码行数:31,代码来源:artifact_list_builder.py

示例10: _parallel_execute

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import close [as 别名]
def _parallel_execute(datasources, options, outs_dir, pabot_args, suite_names):
    original_signal_handler = signal.signal(signal.SIGINT, keyboard_interrupt)
    pool = ThreadPool(pabot_args['processes'])
    if (pabot_args.has_key("hostsfile")):
        hosts = [host.rstrip('\r\n') for host in open(pabot_args["hostsfile"])]
    else:
        hosts = None
    if pabot_args["verbose"]:
        print [(suite,host) for (suite,host) in TestsuitesHosts(suite_names, hosts)]
    result = pool.map_async(execute_and_wait_with,
               [(datasources,
                 outs_dir,
                 options,
                 suite,
                 pabot_args['command'],
                 pabot_args['verbose'],
                 host)
                for (suite,host) in TestsuitesHosts(suite_names, hosts)])
    pool.close()
    while not result.ready():
        # keyboard interrupt is executed in main thread and needs this loop to get time to get executed
        try:
            time.sleep(0.1)
        except IOError:
            keyboard_interrupt()
    signal.signal(signal.SIGINT, original_signal_handler)
开发者ID:tangkun75,项目名称:pabot,代码行数:28,代码来源:pabot.py

示例11: _send_some_brokers

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import close [as 别名]
    def _send_some_brokers(self, requests, ignore_errors=True):
        """
        Sends a request to one or more brokers. The responses are returned mapped to the broker that
        they were retrieved from. This method uses a thread pool to parallelize sends.

        Args:
            request (int -> BaseRequest): A dictionary, where keys are integer broker IDs and the values are valid
                request objects that inherit from BaseRequest.

        Returns:
            dict (int -> BaseResponse): A map of broker IDs to response instances (inherited from
                BaseResponse). Failed requests are represented with a value of None
        """
        results = {}
        pool = ThreadPool(processes=self.configuration.broker_threads)
        for broker_id in requests:
            results[broker_id] = pool.apply_async(self._send_to_broker, (broker_id, requests[broker_id]))
        pool.close()
        pool.join()

        responses = {}
        for broker_id in results:
            try:
                responses[broker_id] = results[broker_id].get()
            except ConnectionError:
                if ignore_errors:
                    # Individual broker failures are OK, as we'll represent them with a None value
                    responses[broker_id] = None
                else:
                    raise
        return responses
开发者ID:fvigotti,项目名称:kafka-tools,代码行数:33,代码来源:client.py

示例12: _parallel_execute

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import close [as 别名]
def _parallel_execute(datasources, options, outs_dir, pabot_args, suite_names):
    original_signal_handler = signal.signal(signal.SIGINT, keyboard_interrupt)
    pool = ThreadPool(pabot_args['processes'])
    if pabot_args.get("vectors"):
        result = pool.map_async(execute_and_wait_with,
                    [(datasources,
                     outs_dir,
                     options,
                     suite,
                     pabot_args['command'],
                     pabot_args['verbose'],
                     vector)
                    for suite in suite_names
                    for vector in pabot_args['vectors']])
    else:
        result = pool.map_async(execute_and_wait_with,
                    [(datasources,
                     outs_dir,
                     options,
                     suite,
                     pabot_args['command'],
                     pabot_args['verbose'],
                     None)
                    for suite in suite_names])
    pool.close()
    while not result.ready():
        # keyboard interrupt is executed in main thread and needs this loop to get time to get executed
        try:
            time.sleep(0.1)
        except IOError:
            keyboard_interrupt()
    signal.signal(signal.SIGINT, original_signal_handler)
开发者ID:roamingunner,项目名称:rf-libs,代码行数:34,代码来源:pabot.py

示例13: get_used_properties

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import close [as 别名]
    def get_used_properties(self, set_ids=None, article_ids=None, **filters):
        """
        Returns a sequency of property names in use in the specified set(s) (or setids)
        """
        if set_ids is not None:
            filters["sets"] = set_ids

        if article_ids is not None:
            filters["ids"] = article_ids

        all_properties = self.get_properties()
        flexible_properties = set(all_properties) - set(ALL_FIELDS)

        body = {"query": {"bool": {"must": [
            build_filter(**filters),
            {"exists": {"field": "fakeprop"}}
        ]}}}

        bodies = (copy.deepcopy(body) for _ in range(len(flexible_properties)))
        pool = ThreadPool()
        results = pool.imap(self._get_used_properties, zip(bodies, flexible_properties))

        try:
            for found, prop in zip(results, flexible_properties):
                if found:
                    yield prop
        finally:
            pool.close()
开发者ID:amcat,项目名称:amcat,代码行数:30,代码来源:amcates.py

示例14: run

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import close [as 别名]
    def run(self):
        pool = ThreadPool(self.num_agents)
        for idx in range(self.num_agents):
            pool.apply_async(self.run_experiement, args=(self.experiment, idx))

        pool.close()
        pool.join()
开发者ID:gandalfvn,项目名称:hierarchical_rl,代码行数:9,代码来源:async_rl.py

示例15: downloadPDFs

# 需要导入模块: from multiprocessing.pool import ThreadPool [as 别名]
# 或者: from multiprocessing.pool.ThreadPool import close [as 别名]
	def downloadPDFs(self):
		### Download all the files extracted from the metadata
		startTime = time.strftime("%c")
		# Loop through the CSV
		f = open(self.csvpath)
		metadata = csv.reader(f, quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL, skipinitialspace=True)
		
		for row in metadata:
			pmcid = row[8]
			
			### Check the input is a PMC ID
			if 'PMC' in pmcid:
				print('Starting thread for: '+pmcid)
				
				pool = Pool(30)
				pool.apply_async(self.saveFile, (pmcid,))
				pool.close()
				pool.join()
			else:
				print('Something is wrong. '+pmcid+' is not a PMC id')
				sys.exit(0)
			
		f.close()
		
		print('Finished downloading all files: start {} end {}.'.format(startTime, time.strftime("%c")))
开发者ID:robdunne-uom,项目名称:et-pubmed-xml-pdf,代码行数:27,代码来源:xmlpdf.py


注:本文中的multiprocessing.pool.ThreadPool.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。