当前位置: 首页>>代码示例>>Python>>正文


Python multiprocessing.ProcessingPool类代码示例

本文整理汇总了Python中pathos.multiprocessing.ProcessingPool的典型用法代码示例。如果您正苦于以下问题:Python ProcessingPool类的具体用法?Python ProcessingPool怎么用?Python ProcessingPool使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了ProcessingPool类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _featurize_complexes

  def _featurize_complexes(self, df, featurizer, parallel=True,
                           worker_pool=None):
    """Generates circular fingerprints for dataset."""
    protein_pdbs = list(df["protein_pdb"])
    ligand_pdbs = list(df["ligand_pdb"])
    complexes = zip(ligand_pdbs, protein_pdbs)

    def featurize_wrapper(ligand_protein_pdb_tuple):
      ligand_pdb, protein_pdb = ligand_protein_pdb_tuple
      print("Featurizing %s" % ligand_pdb[0:2])
      molecule_features = featurizer.featurize_complexes([ligand_pdb], [protein_pdb])
      return molecule_features

    if worker_pool is None:
      features = []
      for ligand_protein_pdb_tuple in zip(ligand_pdbs, protein_pdbs):
        features.append(featurize_wrapper(ligand_protein_pdb_tuple))
    else:
      if worker_pool is None:
        worker_pool = ProcessingPool(mp.cpu_count())
        features = worker_pool.map(featurize_wrapper, 
                                   zip(ligand_pdbs, protein_pdbs))
      else:
        features = worker_pool.map_sync(featurize_wrapper, 
                                        zip(ligand_pdbs, protein_pdbs))
      #features = featurize_wrapper(zip(ligand_pdbs, protein_pdbs))
    df[featurizer.__class__.__name__] = list(features)
开发者ID:arose,项目名称:deepchem,代码行数:27,代码来源:featurize.py

示例2: compute_importance

    def compute_importance(self, alpha):
        """

        """
        pool = ProcessingPool(self._numJobs)
        errors = pool.map(self._computeImportanceOfTree,
                          [alpha] * self._numTree, range(self._numTree))
        return np.array(errors).mean(axis=0)
开发者ID:NazBen,项目名称:pyquantregForest,代码行数:8,代码来源:pyquantregForest.py

示例3: run_all_control_analysis

 def run_all_control_analysis(self):
     dirs = dir_walker(self.encode_root)
     control_dir = None
     for d in dirs:
         if 'control' in d.lower():
             control_dir = d
     assert control_dir is not None
     replicates = dir_walker(control_dir, level=1)
     pool = ProcessingPool(nodes=14)
     pool.map(self.control_analysis, tuple(replicates))
     return replicates
开发者ID:saketkc,项目名称:moca_web,代码行数:11,代码来源:encode_processor.py

示例4: register_stack_to_template

def register_stack_to_template(frames, template, regfn, njobs=4, **fnargs):
    """
    Given stack of frames (or a FSeq obj) and a template image, 
    align every frame to template and return a list of functions,
    which take an image and return warped image, aligned to template.
    """
    if njobs > 1:
        pool = ProcessingPool(nodes=njobs) 
        out = pool.map(partial(regfn, template=template, **fnargs), frames)
    else:
        out = np.array([regfn(img, template, **fnargs) for img in frames])
    return out
开发者ID:chrinide,项目名称:image-funcut,代码行数:12,代码来源:opflowreg.py

示例5: main

def main():
	from hyperopt import fmin,tpe,hp,Trials
	from hyperopt.mongoexp import MongoTrials
	import os 

	fit_params=eval(open('fit_parameters.txt').read())
	fit_params['root']=os.getcwd()
	directory=init_directory(fit_params)
	if fit_params['optimization']=='hyperopt':
		space=search_space(fit_params)
		trials=Trials()
		best=fmin(run,space=space,algo=tpe.suggest,max_evals=fit_params['max_evals'],trials=trials)
		plot_results(trials.trials)

	#https://github.com/hyperopt/hyperopt/wiki/Parallelizing-Evaluations-During-Search-via-MongoDB
	''' commands for MongoDB
	mongod --dbpath . --port 1234
	export PYTHONPATH=$PYTHONPATH:/home/pduggins/influence_susceptibility_conformity
	hyperopt-mongo-worker --mongo=localhost:1234/foo_db --poll-interval=0.1
	'''
	if fit_params['optimization']=='mongodb':
		space=search_space(fit_params)
		space['directory']=directory
		trials=MongoTrials('mongo://localhost:1234/foo_db/jobs', exp_key='exp4')
		best=fmin(run,space=space,algo=tpe.suggest,max_evals=fit_params['max_evals'],trials=trials)
		plot_results(trials.trials)

	if fit_params['optimization']=='evolve':
		from pathos.multiprocessing import ProcessingPool as Pool
		from pathos.helpers import freeze_support #for Windows
		import numpy as np
		import pandas as pd
		# freeze_support()
		evo_pop=init_evo_pop(fit_params)
		pool = Pool(nodes=fit_params['threads'])

		for g in range(fit_params['generations']):
			exp_params=[value['P'] for value in evo_pop.itervalues()]
			fitness_list=pool.map(run, exp_params)
			# new_gen_list=tournament_selection(fitness_list,fit_params)
			new_gen_list=rank_proportional_selection(fitness_list)
			remade_pop=remake(evo_pop,new_gen_list)
			mutated_pop=mutate(remade_pop,evo_pop,fit_params)
			evo_pop=mutated_pop
			# crossed_pop=crossover(mutated_pop)
			# evo_pop=crossed_pop
			mean_F=np.average([evo_pop[ind]['F'] for ind in evo_pop.iterkeys()])
			std_F=np.std([evo_pop[ind]['F'] for ind in evo_pop.iterkeys()])
			print '\nGeneration %s: mean_F=%s, std F=%s' %(g+1,mean_F,std_F) 

		out_pop=pd.DataFrame([evo_pop])
		out_pop.reset_index().to_json('evo_pop.json',orient='records')
开发者ID:psipeter,项目名称:influence_susceptibility_conformity,代码行数:52,代码来源:fit_empirical.py

示例6: apply_warps

def apply_warps(warps, frames, njobs=4):
    """
    returns result of applying warps for given frames (one warp per frame)
    """
    if njobs > 1 :
        pool = ProcessingPool(nodes=njobs)
        out = np.array(pool.map(parametric_warp, frames, warps))
    else:
        out = np.array([parametric_warp(f,w) for f,w in itt.izip(frames, warps)])
    if isinstance(frames, fseq.FrameSequence):
        out = fseq.open_seq(out)
        out.meta = frames.meta
    return out
开发者ID:chrinide,项目名称:image-funcut,代码行数:13,代码来源:opflowreg.py

示例7: get_signal_bg_many_parallel

def get_signal_bg_many_parallel(runList, detid, **kwargs):
    """
    Return the averaged signal and background (based on blank frames) over the given runs
    """
    def mapfunc(run_number):
        return get_signal_bg_one_run(run_number, detid, **kwargs)

    MAXNODES = 14
    pool = ProcessingPool(nodes=min(MAXNODES, len(runList)))
    bg = np.zeros(DIMENSIONS_DICT[detid])
    signal = np.zeros(DIMENSIONS_DICT[detid]) 
    run_data = pool.map(mapfunc, runList)
    for signal_increment, bg_increment in run_data:
        signal += (signal_increment / len(runList))
        bg += (bg_increment / len(runList))
    return signal, bg
开发者ID:ggggggggg,项目名称:LCLS,代码行数:16,代码来源:avg_bgsubtract_hdf.py

示例8: run

def run(non_iter_args, do_multiprocessing):
    [
        weightcalcdata,
        weightcalculator,
        box,
        startindex,
        size,
        newconnectionmatrix,
        method,
        boxindex,
        filename,
        headerline,
        writeoutput,
    ] = non_iter_args

    partial_gaincalc_oneset = partial(
        calc_weights_oneset,
        weightcalcdata,
        weightcalculator,
        box,
        startindex,
        size,
        newconnectionmatrix,
        method,
        boxindex,
        filename,
        headerline,
        writeoutput,
    )

    if do_multiprocessing:
        pool = Pool(processes=pathos.multiprocessing.cpu_count())
        pool.map(partial_gaincalc_oneset, weightcalcdata.causevarindexes)

        # Current solution to no close and join methods on ProcessingPool
        # https://github.com/uqfoundation/pathos/issues/46

        s = pathos.multiprocessing.__STATE["pool"]
        s.close()
        s.join()
        pathos.multiprocessing.__STATE["pool"] = None

    else:
        for causevarindex in weightcalcdata.causevarindexes:
            partial_gaincalc_oneset(causevarindex)

    return None
开发者ID:SimonStreicher,项目名称:FaultMap,代码行数:47,代码来源:gaincalc_oneset.py

示例9: _calculate_s_powder_over_atoms_core

    def _calculate_s_powder_over_atoms_core(self, q_indx=None):
        """
        Helper function for _calculate_s_powder_1d.
        :returns: Python dictionary with S data
        """
        atoms_items = {}
        atoms = range(self._num_atoms)
        self._prepare_data(k_point=q_indx)

        if PATHOS_FOUND:
            p_local = ProcessingPool(nodes=AbinsModules.AbinsParameters.threads)
            result = p_local.map(self._calculate_s_powder_one_atom, atoms)
        else:
            result = [self._calculate_s_powder_one_atom(atom=atom) for atom in atoms]

        for atom in range(self._num_atoms):
            atoms_items["atom_%s" % atom] = {"s": result[atoms.index(atom)]}
            self._report_progress(msg="S for atom %s" % atom + " has been calculated.")
        return atoms_items
开发者ID:DanNixon,项目名称:mantid,代码行数:19,代码来源:SPowderSemiEmpiricalCalculator.py

示例10: _exec_sample

    def _exec_sample(X):
        from pathos.multiprocessing import ProcessingPool
        try:
            p = ProcessingPool(n_cpus)
            X = np.array(X)
            x = np.array_split(X, n_cpus)
            pipe = []
            for i in range(n_cpus):
                pipe.append(p.apipe(func, x[i]))

            rs = []
            for i in range(n_cpus):
                rs.append(pipe[i].get())
    
            rs = [item for sublist in rs for item in sublist]

            return ot.NumericalSample(rs)
        except ValueError:
            # Get there if the chuck size left some single evaluations left
            return func(X)
开发者ID:felipeam86,项目名称:otwrapy,代码行数:20,代码来源:_otwrapy.py

示例11: transpose_index

    def transpose_index(self):  # WORKS ONLY FOR TEST DATA
        """Transpose the data according to the index."""

        data = self.data
        indexes = list(set(data.index))

        names, datasets = [], []
        for name in indexes:
            names.append(name)
            datasets.append(data[[name in i for i in data.index]])

        plotSets = zip(names, datasets)

        pool = ProcessingPool()
        plots = []
        for name, dataset in plotSets:
            plots.append(pool.map(self.create_transposed_plot, [name], [dataset]))

        logging.debug('Index transposed')

        return plots
开发者ID:Vifespoir,项目名称:mLearning,代码行数:21,代码来源:dataPlot.py

示例12: _featurize_compounds

  def _featurize_compounds(self, df, featurizer, parallel=True,
                           worker_pool=None):    
    """Featurize individual compounds.

       Given a featurizer that operates on individual chemical compounds 
       or macromolecules, compute & add features for that compound to the 
       features dataframe
    """
    sample_smiles = df["smiles"].tolist()

    if worker_pool is None:
      features = []
      for ind, smiles in enumerate(sample_smiles):
        if ind % self.log_every_n == 0:
          log("Featurizing sample %d" % ind, self.verbose)
        mol = Chem.MolFromSmiles(smiles)
        features.append(featurizer.featurize([mol]))
    else:
      def featurize_wrapper(smiles, dilled_featurizer):
        print("Featurizing %s" % smiles)
        mol = Chem.MolFromSmiles(smiles)
        featurizer = dill.loads(dilled_featurizer)
        feature = featurizer.featurize([mol])
        return feature

      if worker_pool is None:
        dilled_featurizer = dill.dumps(featurizer)
        worker_pool = ProcessingPool(mp.cpu_count())
        featurize_wrapper_partial = partial(featurize_wrapper,
                                            dilled_featurizer=dilled_featurizer)
        features = []
        for smiles in sample_smiles:
          features.append(featurize_wrapper_partial(smiles))
      else:
        features = worker_pool.map_sync(featurize_wrapper, 
                                        sample_smiles)

    df[featurizer.__class__.__name__] = features
开发者ID:arose,项目名称:deepchem,代码行数:38,代码来源:featurize.py

示例13: parallelmap

def parallelmap(func, data, nodes = None):
    """
    Return the averaged signal and background (based on blank frames) over the given runs
    """
    if not nodes:
        nodes = multiprocessing.cpu_count() - 2
    pool = ProcessingPool(nodes=nodes)
    try:
        return pool.map(func, data)
    except KeyboardInterrupt:
        pool.terminate()
        pool.join()
开发者ID:hoidn,项目名称:packages,代码行数:12,代码来源:utils.py

示例14: image_division

    def image_division(self):
        image_rows, image_cols = self.__image.shape[:2]
        print self.__image.shape[:2]
        grid_indices = [
            np.array([x, y])
            for x in xrange(0, image_cols - self.__GRID_SIZE, self.__GRID_SIZE)
            for y in xrange(0, image_rows - self.__GRID_SIZE, self.__GRID_SIZE)
        ]
        pool = Pool()
        output = pool.map(self.grid_division, grid_indices)
        threshod_sucess_sample = 6
        ransacGrouper = RansacLine(1, threshod_sucess_sample, 25, 2)
        for index, edgels in enumerate(output):
            if len(edgels) > threshod_sucess_sample:
                ransacGrouper.edgels = edgels
                ransac_groups = ransacGrouper.applay_parallel_ransac()
                self.line_segment(ransac_groups)

        # print len(self.__lines)
        # for line in self.__lines:
        #     print (line.slope, line.intercept)
        #     coefficients = np.array([line.slope, line.intercept])
        #     # print "cof: ", coefficients
        #     x = np.array([20, 50], dtype=np.int32)
        #     polynomial = np.poly1d(coefficients)
        #     # print "Poly: ", polynomial
        #     y = polynomial(x)
        #     y = [int(e) for e in y]
        #     print "x: ", x, "y: ", y
        #     cv2.line(self.__image, (x[0], y[0]), (x[1], y[1]), (0, 255, 0), 1)

        cv2.imshow("image", self.__image)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
开发者ID:Amin-Abouee,项目名称:PyMarkerDetection,代码行数:34,代码来源:marker_detection.py

示例15: map

    def map(self, f, seq):
        """
        Parallel implementation of map.

        Parameters
        ----------
        f : callable
            A function to map to all the values in 'seq'

        seq : iterable
            An iterable of values to process with 'f'

        Returns
        -------
        results : list, shape=[len(seq)]
            The evaluated values
        """
        if self.n_jobs < 1:
            n_jobs = multiprocessing.cpu_count()
        elif self.n_jobs == 1:
            return list(map(f, seq))
        else:
            n_jobs = self.n_jobs

        pool = Pool(n_jobs)
        results = list(pool.map(f, seq))
        # Closing/joining is not really allowed because pathos sees pools as
        # lasting for the duration of the program.
        return results
开发者ID:crcollins,项目名称:molml,代码行数:29,代码来源:base.py


注:本文中的pathos.multiprocessing.ProcessingPool类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。