当前位置: 首页>>代码示例>>Python>>正文


Python Pool.map_async方法代码示例

本文整理汇总了Python中multiprocessing.pool.Pool.map_async方法的典型用法代码示例。如果您正苦于以下问题:Python Pool.map_async方法的具体用法?Python Pool.map_async怎么用?Python Pool.map_async使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.pool.Pool的用法示例。


在下文中一共展示了Pool.map_async方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: run

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import map_async [as 别名]
    def run(self):
        cases = self.get_test_case()
        # 定义一个进程池
        pool = Pool(processes=len(cases))

        result.append(pool.map_async(self.init_driver, cases.values()))

        pool.close()
        pool.join()

        while not q.empty():
            comm.Template.set_middle(q.get())
开发者ID:ChristianXu,项目名称:BTCCQA_Smoke_Test,代码行数:14,代码来源:DRIVER.py

示例2: image_urls

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import map_async [as 别名]
    def image_urls(self):
        """ Iterates over json obj, gets image links
            Creates pool of workers, creates new workers """
        json_obj = self.jsonify()

        for post in json_obj['posts']:
            if 'ext' in post:
                self.total_count.value += 1

        self.thread_name = json_obj['posts'][0]['semantic_url']

        for post in json_obj['posts']:
            if 'ext' in post:
                filename = str(post['tim']) + post['ext']
                image_url = 'https://i.4cdn.org/{board}/{file}'.format(
                    board=self.board, file=filename)
                self.filename.append(filename)
                self.downloads.append(image_url)
                self.download_image(image_url, filename)

                with self.counter.get_lock():
                    self.counter.value += 1
                    update_progress(self.counter.value, self.total_count.value)

        manager = Manager()
        pool_data = manager.list(self.downloads)
        partial_data = partial(self.download_image, pool_data)
        pool = Pool(self.workers)
        pool_map = pool.map_async(partial_data, self.filename)

        try:
            pool.close()
            pool.join()
        except KeyboardInterrupt:
            print("Aborting")
            pool.terminate()
            pool.join()
开发者ID:Boltovnya,项目名称:8changrab,代码行数:39,代码来源:infinity.py

示例3: raster2pyramid

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import map_async [as 别名]

#.........这里部分代码省略.........
        os.path.dirname(os.path.realpath(__file__)),
        "tilify.py"
    )

    with rasterio.open(input_file, "r") as input_raster:
        output_bands = input_raster.count
        input_dtype = input_raster.dtypes[0]
        output_dtype = input_raster.dtypes[0]
        nodataval = input_raster.nodatavals[0]
        if not nodataval:
            nodataval = 0
        if output_format == "PNG":
            if output_bands > 3:
                output_bands = 3
                output_dtype = 'uint8'
        scales_minmax = ()
        if scale_method == "dtype_scale":
            for index in range(1, output_bands+1):
                scales_minmax += (DTYPE_RANGES[input_dtype], )
        elif scale_method == "minmax_scale":
            for index in range(1, output_bands+1):
                band = input_raster.read(index)
                scales_minmax += ((band.min(), band.max()), )
        elif scale_method == "crop":
            for index in range(1, output_bands+1):
                scales_minmax += ((0, 255), )
        if input_dtype == "uint8":
            scale_method = None
            scales_minmax = ()
            for index in range(1, output_bands+1):
                scales_minmax += ((None, None), )

    # Create configuration
    config = {}
    config.update(
        process_file=process_file,
        output={
            "path": output_dir,
            "format": output_format,
            "type": pyramid_type,
            "bands": output_bands,
            "dtype": output_dtype
            },
        scale_method=scale_method,
        scales_minmax=scales_minmax,
        input_files={"raster": input_file},
        config_dir=os.getcwd(),
        process_minzoom=minzoom,
        process_maxzoom=maxzoom,
        nodataval=nodataval,
        resampling=resampling,
        bounds=bounds,
        pixelbuffer=5,
        baselevel={"zoom": maxzoom, "resampling": resampling}
    )

    LOGGER.info("preparing process ...")

    try:
        mapchete = Mapchete(
            MapcheteConfig(
                config,
                zoom=zoom,
                bounds=bounds
            )
        )
    except PyCompileError as error:
        print error
        return
    except:
        raise

    # Prepare output directory and logging
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logging.config.dictConfig(get_log_config(mapchete))

    for zoom in reversed(range(minzoom, maxzoom+1)):
        # Determine work tiles and run
        work_tiles = mapchete.get_work_tiles(zoom)
        func = partial(_worker,
            mapchete=mapchete,
            overwrite=overwrite
        )
        pool = Pool()
        try:
            pool.map_async(func, work_tiles)
            pool.close()
        except KeyboardInterrupt:
            LOGGER.info(
                "Caught KeyboardInterrupt, terminating workers"
                )
            pool.terminate()
            break
        except:
            raise
        finally:
            pool.close()
            pool.join()
开发者ID:ungarj,项目名称:mapchete,代码行数:104,代码来源:pyramid.py

示例4: map_async

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import map_async [as 别名]
 def map_async(self, func, iterable, chunksize=None, callback=None):
   return Pool.map_async(self, LogExceptions(func), iterable, chunksize, callback)
开发者ID:rbharath,项目名称:deepchem,代码行数:4,代码来源:featurize.py

示例5: compress_cso

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import map_async [as 别名]
def compress_cso(fname_in, fname_out, level):
    fin, fout = open_input_output(fname_in, fname_out)
    fin.seek(0, os.SEEK_END)
    total_bytes = fin.tell()
    fin.seek(0)

    header_size, block_size, ver, align = 0x18, 0x800, 1, DEFAULT_ALIGN
    magic = ZISO_MAGIC if USE_LZ4 else CISO_MAGIC

    # We have to use alignment on any CSO files which > 2GB, for MSB bit of index as the plain indicator
    # If we don't then the index can be larger than 2GB, which its plain indicator was improperly set
    if total_bytes >= 2 ** 31 and align == 0:
        align = 1

    header = generate_cso_header(magic, header_size, total_bytes, block_size, ver, align)
    fout.write(header)

    total_block = total_bytes // block_size
    index_buf = [0 for i in range(total_block + 1)]

    fout.write(b"\x00\x00\x00\x00" * len(index_buf))
    show_comp_info(fname_in, fname_out, total_bytes, block_size, align, level)

    write_pos = fout.tell()
    percent_period = total_block // 100
    percent_cnt = 0

    if MP:
        pool = Pool()
    else:
        pool = None

    block = 0
    while block < total_block:
        if MP:
            percent_cnt += min(total_block - block, MP_NR)
        else:
            percent_cnt += 1

        if percent_cnt >= percent_period and percent_period != 0:
            percent_cnt = 0

            if block == 0:
                print("compress %3d%% avarage rate %3d%%\r" % (block // percent_period, 0), file=sys.stderr, end="")
            else:
                print(
                    "compress %3d%% avarage rate %3d%%\r"
                    % (block // percent_period, 100 * write_pos // (block * 0x800)),
                    file=sys.stderr,
                    end="",
                )

        if MP:
            iso_data = [(fin.read(block_size), level) for i in range(min(total_block - block, MP_NR))]
            cso_data_all = pool.map_async(zip_compress_mp, iso_data).get(9999999)

            for i in range(len(cso_data_all)):
                write_pos = set_align(fout, write_pos, align)
                index_buf[block] = write_pos >> align
                cso_data = cso_data_all[i]

                if 100 * len(cso_data) // len(iso_data[i][0]) >= min(COMPRESS_THREHOLD, 100):
                    cso_data = iso_data[i][0]
                    index_buf[block] |= 0x80000000  # Mark as plain
                elif index_buf[block] & 0x80000000:
                    print(
                        "Align error, you have to increase align by 1 or CFW won't be able to read offset above 2 ** 31 bytes"
                    )
                    sys.exit(1)

                fout.write(cso_data)
                write_pos += len(cso_data)
                block += 1
        else:
            iso_data = fin.read(block_size)

            try:
                cso_data = zip_compress(iso_data, level)
            except zlib.error as e:
                print("%d block: %s" % (block, e))
                sys.exit(-1)

            write_pos = set_align(fout, write_pos, align)
            index_buf[block] = write_pos >> align

            if 100 * len(cso_data) // len(iso_data) >= COMPRESS_THREHOLD:
                cso_data = iso_data
                index_buf[block] |= 0x80000000  # Mark as plain
            elif index_buf[block] & 0x80000000:
                print(
                    "Align error, you have to increase align by 1 or CFW won't be able to read offset above 2 ** 31 bytes"
                )
                sys.exit(1)

            fout.write(cso_data)
            write_pos += len(cso_data)
            block += 1

            # Last position (total size)
    index_buf[block] = write_pos >> align
#.........这里部分代码省略.........
开发者ID:raehik,项目名称:procfw,代码行数:103,代码来源:ciso.py

示例6: map_async

# 需要导入模块: from multiprocessing.pool import Pool [as 别名]
# 或者: from multiprocessing.pool.Pool import map_async [as 别名]
 def map_async(self, func, args=(), kwargs={}, callback=None):
   results = NativePool.map_async(
     self, MultiprocessingLogExceptions(func),
     args, kwargs, callback)
   self.results.extend(results)
   return results
开发者ID:cerdman,项目名称:openfda,代码行数:8,代码来源:multiprocessing_util.py


注:本文中的multiprocessing.pool.Pool.map_async方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。