当前位置: 首页>>代码示例>>Python>>正文


Python pool.close函数代码示例

本文整理汇总了Python中multiprocessing.pool.close函数的典型用法代码示例。如果您正苦于以下问题:Python close函数的具体用法?Python close怎么用?Python close使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了close函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: superheadhunter

def superheadhunter(filelist, keys, defaults=[], **kw):
    #TODO: BENCHMARK! Nchunks, Nfiles
    #TODO: OPTIMIZE?
    '''Headhunter looped over a list of files.'''
    
    Nchunks     = kw.get( 'Nchunks', 25 )
    with_parent = kw.get( 'with_parent', False )
    return_type = kw.get( 'return_type', 'list' )
    
    hunt = functools.partial(headhunter, 
                             keys=keys, 
                             Nchunks=Nchunks, 
                             return_type='raw', 
                             with_parent=False )
    
    pool = Pool()
    raw = pool.map( hunt, filelist )
    pool.close()
    #pool.join()
    
    #Flatten the twice nested list of string matches (this is the fastest way of doing this!!)
    results = []
    for r in raw:   
        results.extend(r)
            
    return merger(results, keys, defaults, return_type)
开发者ID:apodemus,项目名称:obstools,代码行数:26,代码来源:fastfits.py

示例2: ScopedPool

def ScopedPool(*args, **kwargs):
  """Context Manager which returns a multiprocessing.pool instance which
  correctly deals with thrown exceptions.

  *args - Arguments to multiprocessing.pool

  Kwargs:
    kind ('threads', 'procs') - The type of underlying coprocess to use.
    **etc - Arguments to multiprocessing.pool
  """
  if kwargs.pop('kind', None) == 'threads':
    pool = multiprocessing.pool.ThreadPool(*args, **kwargs)
  else:
    orig, orig_args = kwargs.get('initializer'), kwargs.get('initargs', ())
    kwargs['initializer'] = _ScopedPool_initer
    kwargs['initargs'] = orig, orig_args
    pool = multiprocessing.pool.Pool(*args, **kwargs)

  try:
    yield pool
    pool.close()
  except:
    pool.terminate()
    raise
  finally:
    pool.join()
开发者ID:duanwujie,项目名称:depot_tools,代码行数:26,代码来源:git_common.py

示例3: run_trajectory

def run_trajectory(t, ps, landscape, ptv, num_iterations, num_processors):
    # Get the points in the trajectory
    points = t.points()

    # Determine the index of each unique point (sometimes points are equal due to rounding)
    uinds = [i for i, p in enumerate(points) if i == 0 or not p.equals(points[i - 1])]

    # Create a process pool, using as many processors as are available, or
    # are required to allow each point to run concurrently
    pool = mp.Pool(processes=min(num_processors, len(points)))

    results = []
    for i in uinds:
        # Modify the parameter set to match the current point
        psm = ps.copy()
        psm.modify_for_point(points[i], ptv)
        psm.convert_to_age_classes()

        # Launch a process to run the simulation(s) for the point. This modifies the point in place
        args = [points[i], psm, landscape, num_iterations, num_processors]
        results.append(pool.apply_async(run_iterations_for_point, args))

    pool.close()
    pool.join()

    # Merge the unique and non-unique points back together
    for i, r in zip(uinds, results):
        points[i] = r.get(None)

    # Return a new trajectory containing the results for each point
    return io.Trajectory(points=points)
开发者ID:saamrenton,项目名称:GMBI,代码行数:31,代码来源:GMBI.py

示例4: pass_data_to_search

def pass_data_to_search(symbol,path,start_time_seconds,end_time_seconds,date,time_interval,tt,code_path):

    jobs=[]
    dic_files={}
    lis=[]
    slot_results=[]
    
    file_name = path+'b'+date+'.l.bz2'
    # file_name = path + date+'/'+dic_files[lis[index]]+'.bz2'
        
    size=os.path.getsize(file_name)
    total_rows=size/69
    total_processes1=40
    slots=total_rows/total_processes1

    #Multiprocessing each file as chunk
    # mapper(0,slots,total_processes1,symbol,start_time_seconds,end_time_seconds,date,time_interval,file_name,tt,code_path)
    # mapper(1,slots,total_processes1,symbol,start_time_seconds,end_time_seconds,date,time_interval,file_name,tt,code_path)
    
    pool = multiprocessing.Pool(total_processes1)
    

    for i in range(total_processes1):

        pool.apply_async(mapper, args = (i,slots,total_processes1,symbol,start_time_seconds,end_time_seconds,date,time_interval,file_name,tt,code_path))
        
    pool.close()
    pool.join()    
开发者ID:kaush-utkarsh,项目名称:init-py-crawlers,代码行数:28,代码来源:merged.py

示例5: _map_parallel

def _map_parallel(function, args, n_jobs):
    """multiprocessing.Pool(processors=n_jobs).map with some error checking"""
    # Following the error checking found in joblib
    multiprocessing = int(os.environ.get('JOBLIB_MULTIPROCESSING', 1)) or None
    if multiprocessing:
        try:
            import multiprocessing
            import multiprocessing.pool
        except ImportError:
            multiprocessing = None
    # 2nd stage: validate that locking is available on the system and
    #            issue a warning if not
    if multiprocessing:
        try:
            _sem = multiprocessing.Semaphore()
            del _sem  # cleanup
        except (ImportError, OSError) as e:
            multiprocessing = None
            warnings.warn('%s. _map_parallel will operate in serial mode' % (e,))
    if multiprocessing and int(n_jobs) not in (0, 1):
        if n_jobs == -1:
            n_jobs = None
        pool = multiprocessing.Pool(processes=n_jobs)
        map_result = pool.map(function, args)
        pool.close()
        pool.join()
    else:
        map_result = list(map(function, args))
    return map_result
开发者ID:xyh-cosmo,项目名称:pystan,代码行数:29,代码来源:model.py

示例6: parallel_compile

def parallel_compile(self, sources, output_dir=None, macros=None,
                     include_dirs=None, debug=0, extra_preargs=None,
                     extra_postargs=None, depends=None):
    """New compile function that we monkey patch into the existing compiler instance.
    """
    import multiprocessing.pool

    # Copied from the regular compile function
    macros, objects, extra_postargs, pp_opts, build = \
            self._setup_compile(output_dir, macros, include_dirs, sources,
                                depends, extra_postargs)
    cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)

    def _single_compile(obj):
        try:
            src, ext = build[obj]
        except KeyError:
            return
        self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)

    # Set by fix_compiler
    global glob_use_njobs
    if glob_use_njobs == 1:
        # This is equivalent to regular compile function
        for obj in objects:
            _single_compile(obj)
    else:
        # Use ThreadPool, rather than Pool, since the objects are picklable.
        pool = multiprocessing.pool.ThreadPool(glob_use_njobs)
        pool.map(_single_compile, objects)
        pool.close()
        pool.join()

    # Return *all* object filenames, not just the ones we just built.
    return objects
开发者ID:GalSim-developers,项目名称:GalSim,代码行数:35,代码来源:setup.py

示例7: count_intersect

    def count_intersect(self, threshold, frequency=True):

        self.counts = OrderedDict()
        self.rlen, self.qlen = {}, {}
        self.nalist = []

        if frequency:
            self.frequency = OrderedDict()

        # if self.mode_count == "bp":
        #    print2(self.parameter, "\n{0}\t{1}\t{2}\t{3}\t{4}".format("Reference","Length(bp)", "Query", "Length(bp)", "Length of Intersection(bp)"))
        # elif self.mode_count == "count":
        #    print2(self.parameter, "\n{0}\t{1}\t{2}\t{3}\t{4}".format("Reference","sequence_number", "Query", "sequence_number", "Number of Intersection"))

        for ty in self.groupedreference.keys():
            self.counts[ty] = OrderedDict()
            self.rlen[ty], self.qlen[ty] = OrderedDict(), OrderedDict()
            if frequency:
                self.frequency[ty] = OrderedDict()

            for r in self.groupedreference[ty]:
                if r.total_coverage() == 0 and len(r) > 0:
                    self.nalist.append(r.name)
                    continue
                else:
                    self.counts[ty][r.name] = OrderedDict()
                    if self.mode_count == "bp":
                        rlen = r.total_coverage()
                    elif self.mode_count == "count":
                        rlen = len(r)
                    self.rlen[ty][r.name] = rlen

                    mp_input = []
                    for q in self.groupedquery[ty]:
                        if r.name == q.name:
                            continue
                        else:
                            mp_input.append([q, self.nalist, self.mode_count, self.qlen, threshold,
                                             self.counts, frequency, self.frequency, ty, r])
                    # q, nalist, mode_count, qlen_dict, threshold, counts, frequency, self_frequency, ty, r
                    pool = multiprocessing.Pool(processes=multiprocessing.cpu_count() - 1)
                    mp_output = pool.map(mp_count_intersect, mp_input)
                    pool.close()
                    pool.join()

                    # qname, nalist, qlen_dict[ty][q.name], counts[ty][r.name][q.name], self_frequency[ty][q.name].append(c[2])
                    for output in mp_output:
                        if output[1]:
                            self.nalist.append(output[1])
                        else:
                            self.qlen[ty][output[0]] = output[2]
                            self.counts[ty][r.name][output[0]] = output[3]
                            # print(r.name)
                            # print(output[0])
                            # print(output[3])
                            try:
                                self.frequency[ty][output[0]][r.name] = output[3][2]
                            except:
                                self.frequency[ty][output[0]] = {}
                                self.frequency[ty][output[0]][r.name] = output[3][2]
开发者ID:CostaLab,项目名称:reg-gen,代码行数:60,代码来源:intersection_test.py

示例8: _CompileDeps

def _CompileDeps(aapt_path, dep_subdirs, temp_dir):
  partials_dir = os.path.join(temp_dir, 'partials')
  build_utils.MakeDirectory(partials_dir)
  partial_compile_command = [
      aapt_path + '2',
      'compile',
      # TODO(wnwen): Turn this on once aapt2 forces 9-patch to be crunched.
      # '--no-crunch',
  ]
  pool = multiprocessing.pool.ThreadPool(10)
  def compile_partial(directory):
    dirname = os.path.basename(directory)
    partial_path = os.path.join(partials_dir, dirname + '.zip')
    compile_command = (partial_compile_command +
                       ['--dir', directory, '-o', partial_path])
    build_utils.CheckOutput(compile_command)

    # Sorting the files in the partial ensures deterministic output from the
    # aapt2 link step which uses order of files in the partial.
    sorted_partial_path = os.path.join(partials_dir, dirname + '.sorted.zip')
    _SortZip(partial_path, sorted_partial_path)

    return sorted_partial_path

  partials = pool.map(compile_partial, dep_subdirs)
  pool.close()
  pool.join()
  return partials
开发者ID:gregocyro,项目名称:android,代码行数:28,代码来源:compile_resources.py

示例9: slippy_test

def slippy_test(test_options, width=TILE_WIDTH, height=TILE_HEIGHT, tile_factor=TILE_FACTOR):
	#assume each screen is a 10x5 grid of tiles
	#this approximately the OTM map size at full screen
	#at my desk
	z = test_options['z']
	x = test_options['x']
	y = test_options['y']
	url_prefix = test_options['url_prefix']


	tiles_to_request = []
	for x_iter in range(x - width/2, x + width/2 - 1):
		for y_iter in range(y - height/2, y + height/2 - 1):
			tiles_to_request.append(url_prefix + '%d/%d/%d.png' % (z, x_iter, y_iter))

	pool = multiprocessing.Pool(processes=tile_factor)
	start_time = time.time()
	results = pool.map(slippy_test_helper, tiles_to_request)
	end_time = time.time()
	pool.close()
	pool.join()
	sys.stderr.write('.')

	if(False in results):
		return '%d,ERROR,%f' % (-1, float('nan'))
	return '%d,OK,' % z + str(end_time - start_time)
开发者ID:blc56,项目名称:PlanetWoo,代码行数:26,代码来源:map_flogger.py

示例10: process_iteration

def process_iteration(Ns, ps, landscape, config):
	output_dir = config.output_dir + config.ext
	
	if config.background_image != None:
		background_path = config.input_dir + "/" + config.background_image
	else:
		background_path = None
	
	#Create a point to hold the iteration
	p = Point()
	p.add_iteration()
	
	#draw_population(Ns[0], landscape, ps.totalK, 0, output_dir, 2.0, background_path)
	
	if config.display:
		pool = mp.Pool(config.num_processors)

	for t in xrange(min(ps.max_time_steps, len(Ns))):
		if config.display:
			pool.apply_async(draw_population, [Ns[t], landscape, ps.totalK, t, output_dir, 2.0, background_path])
		
		p.add_time_step([t] + population_statistics(ps, landscape, Ns[t]))
	
	pool.close()

	#Write the iteration results to file as a trajectory containing a single point
	write_trajectories([Trajectory(points=[p])], None, ps.sentinels, output_dir + "/results.txt")

	if config.save_time_steps:
		np.savez(output_dir + "/populations.npz", *Ns)

	pool.join()
开发者ID:saamrenton,项目名称:GMBI,代码行数:32,代码来源:gmbiIO.py

示例11: _yield_koji_relationships_from_build

    def _yield_koji_relationships_from_build(self, koji_url, build_id, rpms=None):

        # Get all RPMs for a build... only if they're not supplied.
        if not rpms:
            build, rpms = pdcupdater.services.koji_rpms_from_build(
                koji_url, build_id)

        results = collections.defaultdict(set)

        def _get_requirements(filename):
            log.debug("Looking up installtime deps in koji for %r" % filename)
            return filename, pdcupdater.services.koji_yield_rpm_requires(
                self.koji_url, filename)

        # Look up the *build time* deps, in parallel.  Lots of I/O wait..
        # Look up the *install time* deps, in parallel.  Lots of I/O wait..
        pool = multiprocessing.pool.ThreadPool(self.io_threads)
        requirements = pool.map(_get_requirements, rpms)
        pool.close()

        for filename, requirements in requirements:
            parent = filename.rsplit('-', 2)[0]

            for name, qualifier, version in requirements:
                # XXX - we're dropping any >= or <= information here, which is
                # OK for now.  All we need to know is that there is a
                # dependency.
                results[parent].add(('RPMRequires', name,))

        for parent in results:
            for relationship_type, child in results[parent]:
                yield parent, relationship_type, child
开发者ID:fedora-infra,项目名称:pdc-updater,代码行数:32,代码来源:rpms.py

示例12: _map_parallel

def _map_parallel(function, args, n_jobs):
    """multiprocessing.Pool(processors=n_jobs).map with some error checking"""
    # Following the error checking found in joblib
    multiprocessing = int(os.environ.get('JOBLIB_MULTIPROCESSING', 1)) or None
    if multiprocessing:
        try:
            import multiprocessing
            import multiprocessing.pool
        except ImportError:
            multiprocessing = None
        if sys.platform.startswith("win") and PY2:
            msg = "Multiprocessing is not supported on Windows with Python 2.X. Setting n_jobs=1"
            logger.warning(msg)
            n_jobs = 1
    # 2nd stage: validate that locking is available on the system and
    #            issue a warning if not
    if multiprocessing:
        try:
            _sem = multiprocessing.Semaphore()
            del _sem  # cleanup
        except (ImportError, OSError) as e:
            multiprocessing = None
            logger.warning('{}. _map_parallel will operate in serial mode'.format(e))
    if multiprocessing and int(n_jobs) not in (0, 1):
        if n_jobs == -1:
            n_jobs = None
        try:
            pool = multiprocessing.Pool(processes=n_jobs)
            map_result = pool.map(function, args)
        finally:
            pool.close()
            pool.join()
    else:
        map_result = list(map(function, args))
    return map_result
开发者ID:stan-dev,项目名称:pystan,代码行数:35,代码来源:model.py

示例13: run_abstraction_parallel

    def run_abstraction_parallel(self):
        # initialization
        self.__get_methods()
        self.__read_config()
        self.__get_dataset()

        # get filename and properties
        filename_properties = []
        for filename, properties in self.files.iteritems():
            filename_properties.append((filename, properties))

        # run experiment in multiprocessing mode
        total_cpu = multiprocessing.cpu_count()
        pool = NoDaemonProcessPool(processes=total_cpu)
        results = pool.map(self, filename_properties)
        pool.close()
        pool.join()

        # open evaluation file
        self.__check_path(self.files['evaluation_directory'])
        f = open(self.files['evaluation_file'], 'wt')
        writer = csv.writer(f)

        # set header for evaluation file
        header = []
        if self.configuration['main']['abstraction'] == '1':
            header = self.configuration['abstraction_evaluation']['evaluation_file_header'].split('\n')
        writer.writerow(tuple(header))

        # write experiment result
        for result in results:
            writer.writerow(result)

        # close evaluation file
        f.close()
开发者ID:studiawan,项目名称:pygraphc,代码行数:35,代码来源:AbstractionExperiment.py

示例14: _listArtifacts

    def _listArtifacts(self, urls, gavs):
        """
        Loads maven artifacts from list of GAVs and tries to locate the artifacts in one of the
        specified repositories.

        :param urls: repository URLs where the given GAVs can be located
        :param gavs: List of GAVs
        :returns: Dictionary where index is MavenArtifact object and value is it's repo root URL.
        """
        def findArtifact(gav, urls, artifacts):
            artifact = MavenArtifact.createFromGAV(gav)
            for url in urls:
                if maven_repo_util.gavExists(url, artifact):
                    #Critical section?
                    artifacts[artifact] = ArtifactSpec(url, [ArtifactType(artifact.artifactType, True, set(['']))])
                    return

            logging.warning('Artifact %s not found in any url!', artifact)

        artifacts = {}
        pool = ThreadPool(maven_repo_util.MAX_THREADS)
        for gav in gavs:
            pool.apply_async(findArtifact, [gav, urls, artifacts])

        # Close the pool and wait for the workers to finnish
        pool.close()
        pool.join()

        return artifacts
开发者ID:jboss-eap,项目名称:maven-repository-builder,代码行数:29,代码来源:artifact_list_builder.py

示例15: refine

def refine(L, X, D, e, a, b, k, num_workers, metric):
    """ Throw out bad points (algorithm 7, lines 7-17)
    :param L: List of subsets
    :param X: Data matrix
    :param D: dictionary
    :param e: lower bound on fractional size of each cluster
    :param a: lower bound on fractional size of a set inside own cluster for which stability holds
    :param b: lower bound on fractional size of a set outside own cluster for which stability holds
    :param k: Number of clusters
    :param num_workers: Number of workers
    :param metric: metric is in {avg, max, min}
    :return: Refined clusters
    """
    print("Getting rid of bad points")
    print("Length of L at start = ", len(L))
    start = time.time()
    n = len(X)
    T = int((e - 2 * a - b * k) * n)
    t = int((e - a) * n)
    with Pool() as pool:
        func = partial(refine_individual, D, T, t)
        L = pool.map(func, L)
        pool.close()
        pool.join()
    end = time.time()
    print("Length of L on end = ", len(L))
    print("time = {0:.2f}s".format(end - start))
    return grow(L, X, a, num_workers, metric)
开发者ID:ionux,项目名称:clustering,代码行数:28,代码来源:cluster.py


注:本文中的multiprocessing.pool.close函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。