当前位置: 首页>>代码示例>>Python>>正文


Python multiprocessing.ProcessPool方法代码示例

本文整理汇总了Python中pathos.multiprocessing.ProcessPool方法的典型用法代码示例。如果您正苦于以下问题:Python multiprocessing.ProcessPool方法的具体用法?Python multiprocessing.ProcessPool怎么用?Python multiprocessing.ProcessPool使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pathos.multiprocessing的用法示例。


在下文中一共展示了multiprocessing.ProcessPool方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from pathos import multiprocessing [as 别名]
# 或者: from pathos.multiprocessing import ProcessPool [as 别名]
def __init__(self, n_cpu=-1):
        """Process pool for applying functions multi-threaded with progress bars.

        Arguments:
        n_cpu -- The number of processes to spawn. Defaults to the number of threads (logical cores) on your system.

        Usage:
        >>> pool = mlc.SuperPool()  # By default, the cpu count is used
        >>> def f(x):
        ...     return x ** 2
        >>> res = pool.map(f, range(1000))  # Apply function f to every value in y
        [mlcrate] 8 CPUs: 100%|████████████████████████| 1000/1000 [00:00<00:00, 1183.78it/s]
        """
        from multiprocessing import cpu_count
        from pathos.multiprocessing import ProcessPool
        import tqdm

        self.tqdm = tqdm

        if n_cpu == -1:
            n_cpu = cpu_count()

        self.n_cpu = n_cpu
        self.pool = ProcessPool(n_cpu) 
开发者ID:mxbi,项目名称:mlcrate,代码行数:26,代码来源:__init__.py

示例2: parse_many

# 需要导入模块: from pathos import multiprocessing [as 别名]
# 或者: from pathos.multiprocessing import ProcessPool [as 别名]
def parse_many(self, jds, validate_tweet=True, multiprocesses=False):
        """Parse many tweets either in a sequencial or in parallel way.
        """
        if multiprocesses is None or multiprocesses is False\
                or multiprocesses == 0:
            results = [
                self.parse_one(jd, validate_tweet=validate_tweet) for jd in jds
            ]
        else:
            if multiprocesses is True:
                pool = ProcessPool(nodes=None)
            else:
                pool = ProcessPool(nodes=multiprocesses)
            results = pool.map(self.parse_one, jds)
        if self.save_none_url_tweet is False:
            return [r for r in results if r is not None]
        else:
            return results 
开发者ID:IUNetSci,项目名称:hoaxy-backend,代码行数:20,代码来源:parsers.py

示例3: _parallel

# 需要导入模块: from pathos import multiprocessing [as 别名]
# 或者: from pathos.multiprocessing import ProcessPool [as 别名]
def _parallel(ordered: bool, function: Callable, *iterables: Iterable, **kwargs: Any) -> Generator:
    """Returns a generator for a parallel map with a progress bar.

    Arguments:
        ordered(bool): True for an ordered map, false for an unordered map.
        function(Callable): The function to apply to each element of the given Iterables.
        iterables(Tuple[Iterable]): One or more Iterables containing the data to be mapped.

    Returns:
        A generator which will apply the function to each element of the given Iterables
        in parallel in order with a progress bar.
    """

    # Extract num_cpus
    num_cpus = kwargs.pop('num_cpus', None)

    # Determine num_cpus
    if num_cpus is None:
        num_cpus = cpu_count()
    elif type(num_cpus) == float:
        num_cpus = int(round(num_cpus * cpu_count()))

    # Determine length of tqdm (equal to length of shortest iterable)
    length = min(len(iterable) for iterable in iterables if isinstance(iterable, Sized))

    # Create parallel generator
    map_type = 'imap' if ordered else 'uimap'
    pool = Pool(num_cpus)
    map_func = getattr(pool, map_type)

    for item in tqdm(map_func(function, *iterables), total=length, **kwargs):
        yield item

    pool.clear() 
开发者ID:swansonk14,项目名称:p_tqdm,代码行数:36,代码来源:p_tqdm.py

示例4: main

# 需要导入模块: from pathos import multiprocessing [as 别名]
# 或者: from pathos.multiprocessing import ProcessPool [as 别名]
def main(args):
    if len(args.input) < 2:
        print("Please name at least one STAR file and an output directory")
        return 1

    if args.apix is None:
        print("Using pixel size computed from STAR files")
    
    def do_job(star):
        try:
            mrc = os.path.join(args.output, os.path.basename(star).replace(".star", ".mrc"))
            print("Starting reconstruction of %s" % star)
            do_reconstruct(star, mrc, args.apix, args.sym, args.ctf)
            print("Wrote %s reconstruction to %s" % (star, mrc))
            if args.mask is not None:
                masked_mrc = mrc.replace(".mrc", "_masked.mrc")
                do_mask(mrc, masked_mrc, args.mask)
                print("Wrote masked map %s" % masked_mrc)
            if args.mask is not None and args.delete_unmasked:
                delete_unmasked(mrc, masked_mrc)
                print("Overwrote %s with %s" % (mrc, masked_mrc))
        except Exception as e:
            print("Failed on %s" % star)
        return 0

    pool = Pool(nodes=args.nproc)

    #pool.apipe(do_job, args.input)
    results = pool.imap(do_job, args.input)
    codes = list(results)

    if pool is not None:
        pool.close()
        pool.join()
        pool.terminate()

    return 0 
开发者ID:asarnow,项目名称:pyem,代码行数:39,代码来源:reconstruct.py

示例5: batch_sim

# 需要导入模块: from pathos import multiprocessing [as 别名]
# 或者: from pathos.multiprocessing import ProcessPool [as 别名]
def batch_sim(sim_instances, parallel=False):
    tic = time.time()
    if parallel and pathos:
        with Pool() as p:
            results = p.map(sim, sim_instances)
    else:
        if parallel and not pathos:
            print('Simulation is using single process even though parallel=True.')
        results = [sim(s) for s in sim_instances]
    toc = time.time()
    print('Simulation took {} sec.'.format(toc - tic))
    return results 
开发者ID:jxx123,项目名称:simglucose,代码行数:14,代码来源:sim_engine.py

示例6: parallelize_simulations

# 需要导入模块: from pathos import multiprocessing [as 别名]
# 或者: from pathos.multiprocessing import ProcessPool [as 别名]
def parallelize_simulations(
        simulation_execs: List[Callable],
        var_dict_list: List[VarDictType],
        states_lists: List[StatesListsType],
        configs_structs: List[ConfigsType],
        env_processes_list: List[EnvProcessesType],
        Ts: List[range],
        SimIDs,
        Ns: List[int]
    ):
    print(f'Execution Mode: parallelized')
    params = list(
        zip(simulation_execs, var_dict_list, states_lists, configs_structs, env_processes_list, Ts, SimIDs, Ns)
    )

    len_configs_structs = len(configs_structs)

    unique_runs = Counter(SimIDs)
    sim_count = max(unique_runs.values())
    highest_divisor = int(len_configs_structs / sim_count)

    new_configs_structs, new_params = [], []
    for count in range(sim_count):
        if count == 0:
            new_params.append(
                params[count: highest_divisor]
            )
            new_configs_structs.append(
                configs_structs[count: highest_divisor]
            )
        elif count > 0:
            new_params.append(
                params[count * highest_divisor: (count + 1) * highest_divisor]
            )
            new_configs_structs.append(
                configs_structs[count * highest_divisor: (count + 1) * highest_divisor]
            )

    def threaded_executor(params):
        tp = TPool(len_configs_structs)
        if len_configs_structs > 1:
            results = tp.map(lambda t: t[0](t[1], t[2], t[3], t[4], t[5], t[6], t[7]), params)
        else:
            t = params[0]
            results = t[0](t[1], t[2], t[3], t[4], t[5], t[6], t[7])

        tp.close()
        return results

    len_new_configs_structs = len(new_configs_structs)
    pp = PPool(len_new_configs_structs)
    results = flatten(pp.map(lambda params: threaded_executor(params), new_params))
    pp.close()
    return results 
开发者ID:cadCAD-org,项目名称:cadCAD,代码行数:56,代码来源:execution.py


注:本文中的pathos.multiprocessing.ProcessPool方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。