當前位置: 首頁>>代碼示例>>Python>>正文


Python multiprocessing.ProcessPool方法代碼示例

本文整理匯總了Python中pathos.multiprocessing.ProcessPool方法的典型用法代碼示例。如果您正苦於以下問題:Python multiprocessing.ProcessPool方法的具體用法?Python multiprocessing.ProcessPool怎麽用?Python multiprocessing.ProcessPool使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在pathos.multiprocessing的用法示例。


在下文中一共展示了multiprocessing.ProcessPool方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from pathos import multiprocessing [as 別名]
# 或者: from pathos.multiprocessing import ProcessPool [as 別名]
def __init__(self, n_cpu=-1):
        """Process pool for applying functions multi-threaded with progress bars.

        Arguments:
        n_cpu -- The number of processes to spawn. Defaults to the number of threads (logical cores) on your system.

        Usage:
        >>> pool = mlc.SuperPool()  # By default, the cpu count is used
        >>> def f(x):
        ...     return x ** 2
        >>> res = pool.map(f, range(1000))  # Apply function f to every value in y
        [mlcrate] 8 CPUs: 100%|████████████████████████| 1000/1000 [00:00<00:00, 1183.78it/s]
        """
        from multiprocessing import cpu_count
        from pathos.multiprocessing import ProcessPool
        import tqdm

        self.tqdm = tqdm

        if n_cpu == -1:
            n_cpu = cpu_count()

        self.n_cpu = n_cpu
        self.pool = ProcessPool(n_cpu) 
開發者ID:mxbi,項目名稱:mlcrate,代碼行數:26,代碼來源:__init__.py

示例2: parse_many

# 需要導入模塊: from pathos import multiprocessing [as 別名]
# 或者: from pathos.multiprocessing import ProcessPool [as 別名]
def parse_many(self, jds, validate_tweet=True, multiprocesses=False):
        """Parse many tweets either in a sequencial or in parallel way.
        """
        if multiprocesses is None or multiprocesses is False\
                or multiprocesses == 0:
            results = [
                self.parse_one(jd, validate_tweet=validate_tweet) for jd in jds
            ]
        else:
            if multiprocesses is True:
                pool = ProcessPool(nodes=None)
            else:
                pool = ProcessPool(nodes=multiprocesses)
            results = pool.map(self.parse_one, jds)
        if self.save_none_url_tweet is False:
            return [r for r in results if r is not None]
        else:
            return results 
開發者ID:IUNetSci,項目名稱:hoaxy-backend,代碼行數:20,代碼來源:parsers.py

示例3: _parallel

# 需要導入模塊: from pathos import multiprocessing [as 別名]
# 或者: from pathos.multiprocessing import ProcessPool [as 別名]
def _parallel(ordered: bool, function: Callable, *iterables: Iterable, **kwargs: Any) -> Generator:
    """Returns a generator for a parallel map with a progress bar.

    Arguments:
        ordered(bool): True for an ordered map, false for an unordered map.
        function(Callable): The function to apply to each element of the given Iterables.
        iterables(Tuple[Iterable]): One or more Iterables containing the data to be mapped.

    Returns:
        A generator which will apply the function to each element of the given Iterables
        in parallel in order with a progress bar.
    """

    # Extract num_cpus
    num_cpus = kwargs.pop('num_cpus', None)

    # Determine num_cpus
    if num_cpus is None:
        num_cpus = cpu_count()
    elif type(num_cpus) == float:
        num_cpus = int(round(num_cpus * cpu_count()))

    # Determine length of tqdm (equal to length of shortest iterable)
    length = min(len(iterable) for iterable in iterables if isinstance(iterable, Sized))

    # Create parallel generator
    map_type = 'imap' if ordered else 'uimap'
    pool = Pool(num_cpus)
    map_func = getattr(pool, map_type)

    for item in tqdm(map_func(function, *iterables), total=length, **kwargs):
        yield item

    pool.clear() 
開發者ID:swansonk14,項目名稱:p_tqdm,代碼行數:36,代碼來源:p_tqdm.py

示例4: main

# 需要導入模塊: from pathos import multiprocessing [as 別名]
# 或者: from pathos.multiprocessing import ProcessPool [as 別名]
def main(args):
    if len(args.input) < 2:
        print("Please name at least one STAR file and an output directory")
        return 1

    if args.apix is None:
        print("Using pixel size computed from STAR files")
    
    def do_job(star):
        try:
            mrc = os.path.join(args.output, os.path.basename(star).replace(".star", ".mrc"))
            print("Starting reconstruction of %s" % star)
            do_reconstruct(star, mrc, args.apix, args.sym, args.ctf)
            print("Wrote %s reconstruction to %s" % (star, mrc))
            if args.mask is not None:
                masked_mrc = mrc.replace(".mrc", "_masked.mrc")
                do_mask(mrc, masked_mrc, args.mask)
                print("Wrote masked map %s" % masked_mrc)
            if args.mask is not None and args.delete_unmasked:
                delete_unmasked(mrc, masked_mrc)
                print("Overwrote %s with %s" % (mrc, masked_mrc))
        except Exception as e:
            print("Failed on %s" % star)
        return 0

    pool = Pool(nodes=args.nproc)

    #pool.apipe(do_job, args.input)
    results = pool.imap(do_job, args.input)
    codes = list(results)

    if pool is not None:
        pool.close()
        pool.join()
        pool.terminate()

    return 0 
開發者ID:asarnow,項目名稱:pyem,代碼行數:39,代碼來源:reconstruct.py

示例5: batch_sim

# 需要導入模塊: from pathos import multiprocessing [as 別名]
# 或者: from pathos.multiprocessing import ProcessPool [as 別名]
def batch_sim(sim_instances, parallel=False):
    tic = time.time()
    if parallel and pathos:
        with Pool() as p:
            results = p.map(sim, sim_instances)
    else:
        if parallel and not pathos:
            print('Simulation is using single process even though parallel=True.')
        results = [sim(s) for s in sim_instances]
    toc = time.time()
    print('Simulation took {} sec.'.format(toc - tic))
    return results 
開發者ID:jxx123,項目名稱:simglucose,代碼行數:14,代碼來源:sim_engine.py

示例6: parallelize_simulations

# 需要導入模塊: from pathos import multiprocessing [as 別名]
# 或者: from pathos.multiprocessing import ProcessPool [as 別名]
def parallelize_simulations(
        simulation_execs: List[Callable],
        var_dict_list: List[VarDictType],
        states_lists: List[StatesListsType],
        configs_structs: List[ConfigsType],
        env_processes_list: List[EnvProcessesType],
        Ts: List[range],
        SimIDs,
        Ns: List[int]
    ):
    print(f'Execution Mode: parallelized')
    params = list(
        zip(simulation_execs, var_dict_list, states_lists, configs_structs, env_processes_list, Ts, SimIDs, Ns)
    )

    len_configs_structs = len(configs_structs)

    unique_runs = Counter(SimIDs)
    sim_count = max(unique_runs.values())
    highest_divisor = int(len_configs_structs / sim_count)

    new_configs_structs, new_params = [], []
    for count in range(sim_count):
        if count == 0:
            new_params.append(
                params[count: highest_divisor]
            )
            new_configs_structs.append(
                configs_structs[count: highest_divisor]
            )
        elif count > 0:
            new_params.append(
                params[count * highest_divisor: (count + 1) * highest_divisor]
            )
            new_configs_structs.append(
                configs_structs[count * highest_divisor: (count + 1) * highest_divisor]
            )

    def threaded_executor(params):
        tp = TPool(len_configs_structs)
        if len_configs_structs > 1:
            results = tp.map(lambda t: t[0](t[1], t[2], t[3], t[4], t[5], t[6], t[7]), params)
        else:
            t = params[0]
            results = t[0](t[1], t[2], t[3], t[4], t[5], t[6], t[7])

        tp.close()
        return results

    len_new_configs_structs = len(new_configs_structs)
    pp = PPool(len_new_configs_structs)
    results = flatten(pp.map(lambda params: threaded_executor(params), new_params))
    pp.close()
    return results 
開發者ID:cadCAD-org,項目名稱:cadCAD,代碼行數:56,代碼來源:execution.py


注:本文中的pathos.multiprocessing.ProcessPool方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。