当前位置: 首页>>代码示例>>Python>>正文


Python multiprocessing.ProcessingPool方法代码示例

本文整理汇总了Python中pathos.multiprocessing.ProcessingPool方法的典型用法代码示例。如果您正苦于以下问题:Python multiprocessing.ProcessingPool方法的具体用法?Python multiprocessing.ProcessingPool怎么用?Python multiprocessing.ProcessingPool使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pathos.multiprocessing的用法示例。


在下文中一共展示了multiprocessing.ProcessingPool方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from pathos import multiprocessing [as 别名]
# 或者: from pathos.multiprocessing import ProcessingPool [as 别名]
def __init__(self, n_features, n_nodes, embedding_dim, rnn_dim, bidirectional=True,
            sinkhorn_iters=5, sinkhorn_tau=1, num_workers=4, cuda=True):
        super(SPGSequentialActor, self).__init__()
        self.use_cuda = cuda
        self.n_nodes = n_nodes
        self.embedding_dim = embedding_dim
        self.rnn_dim = rnn_dim
        self.num_workers = num_workers
        self.embedding = nn.Linear(n_features, embedding_dim)
        self.gru = nn.GRU(embedding_dim, rnn_dim, bidirectional=bidirectional)
        scale = 2 if bidirectional else 1
        self.fc2 = nn.Linear(scale * self.rnn_dim, n_nodes)
        self.sinkhorn = Sinkhorn(n_nodes, sinkhorn_iters, sinkhorn_tau)
        self.round = linear_assignment
        init_hx = torch.zeros(scale, self.rnn_dim)
        if cuda:
            init_hx = init_hx.cuda()
        self.init_hx = Variable(init_hx, requires_grad=False)
        if num_workers > 0:
            self.pool = Pool(num_workers) 
开发者ID:pemami4911,项目名称:sinkhorn-policy-gradient.pytorch,代码行数:22,代码来源:models.py

示例2: __init__

# 需要导入模块: from pathos import multiprocessing [as 别名]
# 或者: from pathos.multiprocessing import ProcessingPool [as 别名]
def __init__(self, process):
        self.size = process_count or mp.cpu_count()
        self.process = process
        self.phase=None
        self.pool = mp.ProcessingPool(self.size) 
开发者ID:thouska,项目名称:spotpy,代码行数:7,代码来源:mproc.py

示例3: __init__

# 需要导入模块: from pathos import multiprocessing [as 别名]
# 或者: from pathos.multiprocessing import ProcessingPool [as 别名]
def __init__(self, process):
        self.size = process_count or mp.cpu_count()
        self.process = process
        self.phase = None
        self.pool = mp.ProcessingPool(self.size) 
开发者ID:thouska,项目名称:spotpy,代码行数:7,代码来源:umproc.py

示例4: map

# 需要导入模块: from pathos import multiprocessing [as 别名]
# 或者: from pathos.multiprocessing import ProcessingPool [as 别名]
def map(self, f, seq):
        """
        Parallel implementation of map.

        Parameters
        ----------
        f : callable
            A function to map to all the values in 'seq'

        seq : iterable
            An iterable of values to process with 'f'

        Returns
        -------
        results : list, shape=[len(seq)]
            The evaluated values
        """
        if self.n_jobs < 1:
            n_jobs = multiprocessing.cpu_count()
        elif self.n_jobs == 1:
            return list(map(f, seq))
        else:
            n_jobs = self.n_jobs

        pool = Pool(n_jobs)
        results = list(pool.map(f, seq))
        # Closing/joining is not really allowed because pathos sees pools as
        # lasting for the duration of the program.
        return results 
开发者ID:crcollins,项目名称:molml,代码行数:31,代码来源:base.py

示例5: parallelExecute

# 需要导入模块: from pathos import multiprocessing [as 别名]
# 或者: from pathos.multiprocessing import ProcessingPool [as 别名]
def parallelExecute(jobs):
    if arguments.cores == 1:
        return map(lambda j: invokeExecuteMethod(j, timeout = arguments.timeout), jobs)
    else:
        return Pool(arguments.cores).map(lambda j: invokeExecuteMethod(j,timeout = arguments.timeout),jobs)


# Loads all of the particles in the directory, up to the first 200
# Returns the top K as measured by a linear combination of image distance and neural network likelihood 
开发者ID:ellisk42,项目名称:TikZ,代码行数:11,代码来源:synthesizer.py

示例6: parallelMap

# 需要导入模块: from pathos import multiprocessing [as 别名]
# 或者: from pathos.multiprocessing import ProcessingPool [as 别名]
def parallelMap(numberOfCPUs, f, *xs):
    from pathos.multiprocessing import ProcessingPool as Pool

    numberOfCPUs = min(numberOfCPUs,len(zip(*xs)))
    
    if numberOfCPUs == 1: return map(f,*xs)
    def safeCall(x):
        try:
            y = f(*x)
            return y
        except Exception as e:
            print "Exception in worker during parallel map:\n%s"%(traceback.format_exc())
            raise e
    return Pool(numberOfCPUs).map(safeCall,zip(*xs)) 
开发者ID:ellisk42,项目名称:TikZ,代码行数:16,代码来源:utilities.py

示例7: __init__

# 需要导入模块: from pathos import multiprocessing [as 别名]
# 或者: from pathos.multiprocessing import ProcessingPool [as 别名]
def __init__(self, objective_function=None, dimensions=None, **kwargs):
        # No dimensions argument passed
        if dimensions is None:
            try:
                # Use the objective function's number of arguments as dimensions
                dimensions = objective_function.__code__.co_argcount
            except TypeError:
                raise TypeError("Invalid function passed.")

        # Construct PopulationParameters object
        self.parameters = PopulationParameters(dimensions=dimensions, **kwargs)

        self.objective_function = objective_function
        self.elite_population_size = int(self.parameters.elite_fraction * self.parameters.population_size)
        self.evaluated_fitness_ranks = False
        self.evaluated_diversity_ranks = False
        self.mean_fitness = 0
        self.mean_diversity = 0
        self.mean_coordinates = np.zeros((self.parameters.num_dimensions, 1))
        self.num_iterations = 1

        # Multiprocessing defaults
        self.multiprocessing = kwargs.get('multiprocessing', False)
        self.processes = kwargs.get('processes')

        # Create points as Point objects
        self.points = []
        for pointnumber in range(self.parameters.population_size):
            point = Point(associated_population=self, dimensions=self.parameters.num_dimensions)
            self.points.append(point)
            self.points[pointnumber].index = pointnumber

        # If multiprocessing is enabled, create pool of processes.
        if self.multiprocessing:
            if self.processes is None:
                self.pool = mp.ProcessingPool()
            else:
                self.pool = mp.ProcessingPool(ncpus=self.processes)

            fitnesses = self.pool.map(lambda coordinates, func: func(*coordinates), [point.coordinates for point in self.points], [self.objective_function] * self.size)

            # Assign fitnesses to each point
            for index, point in enumerate(self.points):
                point.fitness = fitnesses[index]
        else:
            for point in self.points:
                point.evaluate_fitness(self.objective_function)

        # Evaluate fitness and diversity ranks
        self.__evaluate_fitness_ranks()
        self.__evaluate_diversity_ranks()

    # Evaluate the fitness rank of each point in the population 
开发者ID:ameya98,项目名称:GeneticAlgorithmsRepo,代码行数:55,代码来源:function_maximize.py

示例8: climByAveragingPeriods

# 需要导入模块: from pathos import multiprocessing [as 别名]
# 或者: from pathos.multiprocessing import ProcessingPool [as 别名]
def climByAveragingPeriods(urls,              # list of (daily) granule URLs for a long time period (e.g. a year)
                    nEpochs,                  # compute a climatology for every N epochs (days) by 'averaging'
                    nWindow,                  # number of epochs in window needed for averaging
                    variable,                 # name of primary variable in file
                    mask,                     # name of mask variable
                    coordinates,              # names of coordinate arrays to read and pass on (e.g. 'lat' and 'lon')
                    maskFn=qcMask,            # mask function to compute mask from mask variable
                    averager='pixelAverage',  # averaging function to use, one of ['pixelAverage', 'gaussInterp']
                    mode='sequential',        # Map across time periods of N-days for concurrent work, executed by:
                                              # 'sequential' map, 'multicore' using pool.map(), 'cluster' using pathos pool.map(),
                                              # or 'spark' using PySpark
                    numNodes=1,               # number of cluster nodes to use
                    nWorkers=4,               # number of parallel workers per node
                    averagingFunctions=AveragingFunctions,    # dict of possible averaging functions
                    legalModes=ExecutionModes  # list of possiblel execution modes
                   ):
    '''Compute a climatology every N days by applying a mask and averaging function.
Writes the averaged variable grid, attributes of the primary variable, and the coordinate arrays in a dictionary.
***Assumption:  This routine assumes that the N grids will fit in memory.***
    '''
    try:
        averageFn = averagingFunctions[averager]
    except :
        averageFn = average
        print >>sys.stderr, 'climatology: Error, Averaging function must be one of: %s' % str(averagingFunctions)

    urlSplits = [s for s in fixedSplit(urls, nEpochs)]
    if VERBOSE: print >>sys.stderr, urlSplits

    def climsContoured(urls):
        n = len(urls)
        var = climByAveraging(urls, variable, mask, coordinates, maskFn, averageFn)
        return contourMap(var, variable, coordinates, n, urls[0])

    if mode == 'sequential':
        plots = map(climsContoured, urlSplits)
    elif mode == 'multicore':
        pool = Pool(nWorkers)
        plots = pool.map(climsContoured, urlSplits)        
    elif mode == 'cluster':
        pass
    elif mode == 'spark':
        pass

    plots = map(climsContoured, urlSplits)
    print plots
    return plots
#    return makeMovie(plots, 'clim.mpg') 
开发者ID:apache,项目名称:incubator-sdap-nexus,代码行数:50,代码来源:climatology.py


注:本文中的pathos.multiprocessing.ProcessingPool方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。