当前位置: 首页>>代码示例>>Python>>正文


Python Pool.imap_unordered方法代码示例

本文整理汇总了Python中multiprocessing.Pool.imap_unordered方法的典型用法代码示例。如果您正苦于以下问题:Python Pool.imap_unordered方法的具体用法?Python Pool.imap_unordered怎么用?Python Pool.imap_unordered使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.Pool的用法示例。


在下文中一共展示了Pool.imap_unordered方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: update

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap_unordered [as 别名]
    def update(self, date):
        """Update index components (and weight) for the **same** day before market open."""
        CMD = sql.CMD1.format(date=date)
        self.logger.debug('Executing command:\n{}', CMD)
        self.cursor.execute(CMD)
        df1 = pd.DataFrame(list(self.cursor))
        if len(df1) == 0:
            self.logger.warning('No records found for {} on {}', self.db.index_components.name, date)
            return

        df1.columns = ['dname', 'market', 'sid']
        df1.dname = ['SH'+dname if mkt == 83 else 'SZ'+dname for mkt, dname in zip(df1.market, df1.dname)]
        df1.index = df1.sid

        CMD = sql.CMD2.format(date=date)
        self.logger.debug('Executing command:\n{}', CMD)
        self.cursor.execute(CMD)
        try:
            df2 = pd.DataFrame(list(self.cursor))
            df2.columns = ['dname', 'market', 'sid', 'weight']
            df2.dname = ['SH'+dname if mkt == 83 else 'SZ'+dname for mkt, dname in zip(df2.market, df2.dname)]
            df2.index = df2.sid
        except:
            df2 = None

        grouped = df1.groupby('dname')
        pool = Pool(self.threads)
        pool.imap_unordered(worker, [(date, dname, _df1, df2) for dname, _df1 in grouped], self.threads)
        pool.close()
        pool.join()

        self.logger.info('UPSERT documents for {} indice into (c: [{}]) of (d: [{}]) on {}', len(grouped), COLLECTION.name, self.db.name, date)
开发者ID:leeong05,项目名称:orca,代码行数:34,代码来源:components.py

示例2: __init__

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap_unordered [as 别名]
 def __init__(self,
              pool: multiprocessing.Pool,
              post_files: typing.Iterable[pathlib.Path],
              base_url: str=None,
              feed_url: str=None,
              outdate_epoch: typing.Optional[datetime.datetime]=None):
     self.logger = logging.getLogger('Blog')
     self.pool = pool
     self.base_url = base_url
     self._feed_url = feed_url
     self.logger.info('Loading posts...')
     self.posts = list(map(Post, post_files))
     # Loading published dates
     list(pool.imap_unordered(self._get_published_at, self.posts))
     # Loading titles
     list(pool.imap_unordered(operator.attrgetter('title'), self.posts))
     self.posts.sort(key=self._get_published_at)
     self.canon_posts = [p for p in self.posts if p.canon]
     self.logger.info('Total %d posts are loaded.', len(self.posts))
     self.current_base_path = './'
     self.outdate_epoch = outdate_epoch
     self.jinja2_env = Environment(loader=FileSystemLoader('templates'),
                                   extensions=['jinja2.ext.with_'],
                                   autoescape=True)
     self.jinja2_env.globals.update(
         blog=self,
         href_for=self.resolve_relative_url,
         outdate_epoch=self.outdate_epoch
     )
开发者ID:dahlia,项目名称:blog,代码行数:31,代码来源:gen.py

示例3: make_epoch

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap_unordered [as 别名]
def make_epoch(n, train_true, train_false, val_true, val_false):
    n = n[0]
    train_false = list(train_false)
    val_false = list(val_false)
    np.random.shuffle(train_false)
    np.random.shuffle(val_false)

    n_train_true = len(train_true)
    n_val_true = len(val_true)

    train_epoch = train_true + train_false[:n_train_true*2] #*2 to account for 1 flip directions
    val_epoch = val_true + val_false[:n_val_true*2]

    train_epoch = combine_tups(train_epoch)
    val_epoch = combine_tups(val_epoch)

    print "Epoch {0} n files {1}&{2}".format(n, len(train_epoch), len(val_epoch))
    pool = Pool(processes=12)
    train_epoch_data = list(itertools.chain.from_iterable(pool.imap_unordered(load_data, train_epoch)))
    print "Epoch {0} done loading train".format(n)

    val_epoch_data = list(itertools.chain.from_iterable(pool.imap_unordered(load_data, val_epoch)))
    print "Epoch {0} done loading validation".format(n)
    pool.close()

    np.random.shuffle(train_epoch_data)
    return train_epoch_data, val_epoch_data
开发者ID:gzuidhof,项目名称:luna16,代码行数:29,代码来源:fr3dnet_trainer.py

示例4: main

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap_unordered [as 别名]
def main():
    args = docopt(__doc__)
    feature_name = args['<feature_name>']
    assert feature_name == 'words'
    assert args['<experimentset_name>'] in EXPERIMENT_SETS, '<experimentset_name> must be one of %s' % str(EXPERIMENT_SETS.keys())
    c = get_config()
    experiment_set = EXPERIMENT_SETS[args['<experimentset_name>']](feature_name=feature_name)

    print "Computing foreground group sums using %d cores..." % c.num_cores
    pool = Pool(c.num_cores, init_worker)
    fg_groups = experiment_set.list_foreground_groups()
    cache = {}
    try:
        for group_name, sum_vector in progress.bar(pool.imap_unordered(ComputeForegroundGroupSumCallable(experiment_set), fg_groups), label="Progress ", expected_size=len(fg_groups)):
            cache[group_name] = sum_vector
    except KeyboardInterrupt:
        print "Terminating pool.."
        pool.terminate()
        pool.join()

    print "Computing background sums..."
    bg_groups = experiment_set.list_background_groups()
    for g in bg_groups:
        sum_vector = experiment_set.compute_background_group_sum(g, cache)
        cache[g] = sum_vector

    print "Saving sums to ZODB..."
    zodb_root = open_zodb(read_only=False)
    if getattr(zodb_root, 'group_sums', None) is None:
        zodb_root.group_sums = BTrees.OOBTree.OOBTree()
        transaction.commit()
    if feature_name not in zodb_root.group_sums:
        zodb_root.group_sums[feature_name] = BTrees.OOBTree.OOBTree()
        transaction.commit()
    for k, v in cache.iteritems():
        zodb_root.group_sums[feature_name][k] = v
    transaction.commit()


    print "Creating output db tables..."
    create_db(c.resultsdb_url)
    session_out = open_db(c.resultsdb_url)

    print "Computing overrepresentation using %d cores..." % c.num_cores
    exps = experiment_set.list_experiments()
    cls = experiment_set.result_table_class()
    try:
        for fg, bg, results in progress.bar(pool.imap_unordered(ComputeOverrepresentedWordsCallable(experiment_set), exps), label="Progress ", expected_size=len(exps)):
            for w, odds, pval in results:
                c = cls(foreground_group_name=fg, background_group_name=bg, word=w, odds=odds, pval=pval)
                session_out.add(c)
    except KeyboardInterrupt:
        print "Terminating pool.."
        pool.terminate()
        pool.join()

    print "Committing..."
    session_out.commit()
    print "Done"
开发者ID:johnfelipe,项目名称:talkofacta,代码行数:61,代码来源:extract_significant_features.py

示例5: main

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap_unordered [as 别名]
def main() :
    print "Title here"
    multiprocessing.freeze_support()
    PROCESSES = 4
    print '\r\n\tCreating pool with:\t%d processes' % PROCESSES
    pool = Pool(PROCESSES)
    print '\tNo. of cpu\'s present:\t%d cores' % cpu_count()
    procList = ["process1", "process2", "process3", "process4", "process5", "process6" ]
    for Name in procList :
        pool.imap_unordered(multi_run_wrapper,[(Name,variable1,variable2,variable3, variable4,variable5,variable6)])
    pool.close();    pool.join()
开发者ID:tarneaud,项目名称:python-husks,代码行数:13,代码来源:multiprocessing_template.py

示例6: handle

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap_unordered [as 别名]
    def handle(self, *args, **options):
        user_count = options['count'] + 1
        users = range(1, user_count)
        versions = list(Version.objects.select_related('app', 'platform').filter_by_enabled())

        job_size = int(user_count / (cpu_count() or 1 * 2)) or 1
        job_data = [users[i:i + job_size] for i in range(0, len(users), job_size)]

        pool = Pool()
        pool.imap_unordered(partial(run_worker, versions=versions), job_data)
        pool.close()
        pool.join()
开发者ID:shashkin,项目名称:omaha-server,代码行数:14,代码来源:generate_fake_statistics.py

示例7: _go

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap_unordered [as 别名]
 def _go(self, num_procs, chunk_size = None):
     '''
     This is the equivalent of the main method. It will 
     create the processes and the pipeline between item generators -> 
     mappers -> a reducer.
     '''
     pool = None
     try:
         print('Initiating...', file=sys.stderr)
 
         igen = self.item_generator()
         reducer = self.reducer()
         mapper = self.mapper()
         
         if (num_procs > 1):
             print('Using %d processes' %num_procs, file=sys.stderr)
             
             def igen_helper():
                 '''
                 Helper generator to pass the mapper object
                 to each slave process
                 '''
                 for key, item in igen:
                     yield (mapper, key, item)
             
             pool = Pool(num_procs)
             results = None
             if not chunk_size:
                 results = pool.imap_unordered(_processor_helper, 
                                               igen_helper())
             else:
                 results = pool.imap_unordered(_processor_helper, 
                                               igen_helper(), chunk_size)
             
             for key, value in results:
                 reducer(key, value)
 
         else:
             print('Using one mapper only', file = sys.stderr)
                 
             for key, item in igen:
                 value = mapper(key, item)
                 reducer(key, value)
                 
         self.finalize()
         print('Done.', file = sys.stderr)
     finally:
         if pool:
             pool.close()
             pool.join()
开发者ID:flaviovdf,项目名称:vodlibs,代码行数:52,代码来源:mapreducescript.py

示例8: all_links

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap_unordered [as 别名]
def all_links(root='http://ailev.livejournal.com/', nb=10, path='post-list.txt'):
    print('Fetch calendar entries')
    days = list_days(root_url=root)
    print('There are {} days with entries'.format(len(days)))
    t0 = time()

    pool = Pool(processes=nb)
    it = pool.imap_unordered(list_posts, days)
    work = list(tqdm(it, total=len(days)))
    pool.close()
    pool.join()

    links = []
    for x in work:
        if x:
            links.extend(x)
    # there may be duplicates, don't know why, so fast walkaround
    links = list(set(links))
    links.sort()

    with open(path, 'w') as fout:
        fout.writelines(x + '\n' for x in links)

    t1 = time()
    print('Done for {}s'.format(t1 - t0))
    return links
开发者ID:m12sl,项目名称:levenchuck-post,代码行数:28,代码来源:download.py

示例9: parallel_iter

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap_unordered [as 别名]
def parallel_iter(processes, f, inputs):
    """
    Return a parallel iterator.

    INPUT:

    - ``processes`` -- integer
    - ``f`` -- function
    - ``inputs`` -- an iterable of pairs (args, kwds)

    OUTPUT:

    - iterator over values of ``f`` at ``args,kwds`` in some random order.

    EXAMPLES::

        sage: def f(x): return x+x
        sage: import sage.parallel.multiprocessing_sage
        sage: v = list(sage.parallel.multiprocessing_sage.parallel_iter(2, f, [((2,), {}), ((3,),{})]))
        sage: v.sort(); v
        [(((2,), {}), 4), (((3,), {}), 6)]
    """
    from twisted.internet import reactor   # do not delete this (!)  -- see trac 8785

    if processes == 0: processes = ncpus.ncpus()
    p = Pool(processes)
    fp = pickle_function(f)

    result = p.imap_unordered(call_pickled_function, [ (fp, t) for t in inputs ])
    for res in result:
        yield res
    p.close()
    p.join()
开发者ID:ProgVal,项目名称:sage,代码行数:35,代码来源:multiprocessing_sage.py

示例10: main

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap_unordered [as 别名]
def main():
    parser = argparse.ArgumentParser(description='Analyze a bandersnatch mirror.')
    parser.add_argument('--json',
                       help='save raw data to a json file',
                       default=None)
    args = parser.parse_args()
    concurrency = 8
    root = "/var/spool/pypi/web/packages/source/"
    p = Pool()
    results = {}
    try:
        try:
            for path, result in \
                p.imap_unordered(analyse_sdist, yield_packages(root)):
                results[path] = result
            p.close()
        except:
            p.terminate()
            raise
    finally:
        p.join()
    if args.json:
        with open(args.json, 'wb') as f:
            f.write(json.dumps(results))
    pprint.pprint(results)
开发者ID:pombredanne,项目名称:banderscan,代码行数:27,代码来源:scan.py

示例11: run_committee

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap_unordered [as 别名]
def run_committee(graph, eweights, signs, tree_kind='rst', train_vertices=.1,
                  size=13, degree_function=None, threshold_function=None):
    global GRAPH, EWEIGHTS, SIGNS, VTRAIN
    GRAPH, EWEIGHTS, SIGNS = graph, eweights, signs
    if isinstance(train_vertices, float):
        num_revealed = int(train_vertices*len(graph))
        train_vertices = random.sample(list(graph.keys()), num_revealed)
    VTRAIN = train_vertices
    tree_kind = tree_kind.lower()
    assert tree_kind in ['rst', 'bfs', 'stg'], tree_kind
    if tree_kind == 'rst':
        args = size*[(get_rst, {'fake': None}), ]
    if tree_kind == 'bfs':
        degrees = sorted(((node, len(adj)) for node, adj in graph.items()),
                         key=lambda x: x[1])
        args = [(get_bfs, {'root': _[0]}) for _ in degrees[-size:]]
    if tree_kind == 'stg':
        func_dict = {'degree_function': degree_function,
                     'threshold_function': threshold_function}
        args = size*[(get_stg, func_dict), ]
    num_threads = min(6, size)
    pool = Pool(num_threads)
    res = list(pool.imap_unordered(predict, args,
                                   chunksize=size//num_threads))
    preds, gold = [_[1] for _ in res], res[0][0]
    return gold, majority_vote(preds)
开发者ID:daureg,项目名称:magnet,代码行数:28,代码来源:shazoo.py

示例12: initJobsDirs

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap_unordered [as 别名]
def initJobsDirs(jobs_dir, include_wt, displacement, debug):
    """ initializes the jobDirs variable """
    pdbToProtein = { pdbPath.split('/')[-1]: protein
                     for protein, pdbPath in pdbs.iteritems() }

    roots = list()
    filelists = list()
    for root, dirs, files in os.walk(jobs_dir, followlinks=True):
        roots.append(root)
        filelists.append(files)
        # checkJobsDir(pdbToProtein, root, files, include_wt, displacement, debug)

    doers = Pool(cpu_count())
    jobs = zip([pdbToProtein]*len(roots),
               roots,
               filelists,
               [include_wt]*len(roots),
               [displacement]*len(roots),
               [debug]*len(roots))

    print("Loading {} evaluators...{}".format(len(roots),
                                              datetime.datetime.now()))
    sys.stdout.flush()
    with click.progressbar(doers.imap_unordered(checkJobsDir, jobs),
                           length=len(roots), label='Running',
                           file=sys.stderr) as progbar:
        for j in progbar:
            pass
    print("Done...{}".format(datetime.datetime.now()))
开发者ID:immunityproject,项目名称:pySE,代码行数:31,代码来源:compute_shannon_entropy.py

示例13: get_kmer_counts

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap_unordered [as 别名]
def get_kmer_counts(input, output, k, ns, nprocs, verbose):
    """Analyse kmers. Multiprocessing enabled"""
    #define base2digit dict for 4-char seq
    base2digit = {"A": "0", "C": "1", "G": "2", "T": "3"}    
    if ns:
        #change to 5-char seq if Ns in seq
        base2digit = {"A": "0", "C": "1", "G": "2", "N": "3", "T": "4"}
    #init mer counts
    #255 for uint8 #65,535 for uint16 or #4,294,967,295 for uint32 
    merCounts = np.zeros(len(base2digit)**k/2, dtype='uint16')
    #start pool #maxtasksperchild=1000)
    p = Pool(nprocs, initializer=init_args, initargs=(k, ns, base2digit)) 
    #process reads
    for i, ids in enumerate(p.imap_unordered(seq2mers, SeqIO.parse(input, 'fastq'), \
                                             chunksize=100), 1):
        if not i%1e4:
            sys.stderr.write(" %s [%s Mb]\r"%(i, resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024))
        for mid in ids:
            merCounts[mid] += 1
    sys.stderr.write(" %s [%s Mb]\n"%(i, resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024))
    #get mer freq
    maxCount    = merCounts.max()
    if maxCount < 100:
        maxCount = 100
    occurencies = [0]*maxCount
    for c in merCounts:
        occurencies[c-1] += 1
    #write to file
    output.write("\n".join("%s\t%s"%xy for xy in enumerate(occurencies,1))+"\n")
    return occurencies
开发者ID:jpmtavares,项目名称:bin,代码行数:32,代码来源:fastq2kmers.py

示例14: subconfigure

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap_unordered [as 别名]
def subconfigure(args):
    parser = argparse.ArgumentParser()
    parser.add_argument('--list', type=str,
        help='File containing a list of subconfigures to run')
    parser.add_argument('--skip', type=str,
        help='File containing a list of Subconfigures to skip')
    parser.add_argument('subconfigures', type=str, nargs='*',
        help='Subconfigures to run if no list file is given')
    args, others = parser.parse_known_args(args)
    subconfigures = args.subconfigures
    if args.list:
        subconfigures.extend(open(args.list, 'rb').read().splitlines())
    if args.skip:
        skips = set(open(args.skip, 'rb').read().splitlines())
        subconfigures = [s for s in subconfigures if s not in skips]

    if not subconfigures:
        return 0

    ret = 0
    # One would think using a ThreadPool would be faster, considering
    # everything happens in subprocesses anyways, but no, it's actually
    # slower on Windows. (20s difference overall!)
    pool = Pool(min(len(subconfigures), cpu_count()))
    for relobjdir, returncode, output in \
            pool.imap_unordered(run, subconfigures):
        print prefix_lines(output, relobjdir)
        sys.stdout.flush()
        ret = max(returncode, ret)
        if ret:
            break
    pool.close()
    pool.join()
    return ret
开发者ID:Andrel322,项目名称:gecko-dev,代码行数:36,代码来源:subconfigure.py

示例15: main

# 需要导入模块: from multiprocessing import Pool [as 别名]
# 或者: from multiprocessing.Pool import imap_unordered [as 别名]
def main(argv):
    """Go Main Go."""
    scenario = int(argv[1])
    lengths = load_lengths(scenario)
    dates = determine_dates(sys.argv)
    huc12s = find_huc12s(scenario)
    precip = load_precip(dates)
    jobs = []
    for huc12 in huc12s:
        jobs.append([scenario, huc12, lengths[huc12], dates, precip[huc12]])

    # Begin the processing work now!
    # NB: Usage of a ThreadPool here ended in tears (so slow)
    pool = Pool()
    totalinserts = 0
    totalskipped = 0
    totaldeleted = 0
    for huc12, inserts, skipped, deleted in tqdm(
            pool.imap_unordered(do_huc12, jobs), total=len(jobs),
            disable=(not sys.stdout.isatty())):
        if inserts is None:
            print("ERROR: huc12 %s returned 0 data" % (huc12,))
            continue
        totalinserts += inserts
        totalskipped += skipped
        totaldeleted += deleted
    print("env2database.py inserts: %s skips: %s deleted: %s" % (totalinserts,
                                                                 totalskipped,
                                                                 totaldeleted))
    update_metadata(scenario, dates)
开发者ID:akrherz,项目名称:idep,代码行数:32,代码来源:env2database.py


注:本文中的multiprocessing.Pool.imap_unordered方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。