当前位置: 首页>>代码示例>>Python>>正文


Python ProcessPoolExecutor.shutdown方法代码示例

本文整理汇总了Python中concurrent.futures.ProcessPoolExecutor.shutdown方法的典型用法代码示例。如果您正苦于以下问题:Python ProcessPoolExecutor.shutdown方法的具体用法?Python ProcessPoolExecutor.shutdown怎么用?Python ProcessPoolExecutor.shutdown使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在concurrent.futures.ProcessPoolExecutor的用法示例。


在下文中一共展示了ProcessPoolExecutor.shutdown方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _Worker

# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import shutdown [as 别名]
class _Worker(object):
    def __init__(self, protocol=None):
        self.protocol = protocol
        self.pool = ProcessPoolExecutor(max_workers=1)
        self.pool.submit(id, 42).result()  # start the worker process

    def run(self, func, *args, **kwargs):
        """Synchronous remote function call"""

        input_payload = dumps((func, args, kwargs), protocol=self.protocol)
        result_payload = self.pool.submit(
            call_func, input_payload, self.protocol).result()
        result = loads(result_payload)

        if isinstance(result, BaseException):
            raise result
        return result

    def memsize(self):
        workers_pids = [p.pid if hasattr(p, "pid") else p
                        for p in list(self.pool._processes)]
        num_workers = len(workers_pids)
        if num_workers == 0:
            return 0
        elif num_workers > 1:
            raise RuntimeError("Unexpected number of workers: %d"
                               % num_workers)
        return psutil.Process(workers_pids[0]).memory_info().rss

    def close(self):
        self.pool.shutdown(wait=True)
开发者ID:cloudpipe,项目名称:cloudpickle,代码行数:33,代码来源:testutils.py

示例2: on_message

# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import shutdown [as 别名]
 def on_message(self, message):
     print len(message)
     result = yield tornado.gen.Task(self.process_message, message)
     return
     pool = ProcessPoolExecutor()
     fut = pool.submit(call_process, message)
     ret = yield fut
     pool.shutdown()
开发者ID:wallarelvo,项目名称:jammi,代码行数:10,代码来源:connection.py

示例3: splice_gmaps

# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import shutdown [as 别名]
def splice_gmaps(threadpool, tilefolder, tempfiles, name):
    processpool = ProcessPoolExecutor()
    caption = "Rendering Zoom Layers {}".format(name)
    loadingbar = Bar(caption=caption)
    loadingbar.set_progress(0, caption)
    pygame.display.update()

    side = 1600
    zoom_levels = 4
    factor = 2 ** (zoom_levels - 1)
    masterside = side * factor
    plates = generate_plate_coords(factor, tempfiles)

    master_surface = pygame.Surface((masterside, masterside))

    done = 0
    total = len(tempfiles) + len(plates) * sum((4 ** x for x in range(zoom_levels)))
    fraction = 100 / total

    def render_base_to_master(task):
        imgdata, size, location = task.result()
        tempsurf = pygame.image.frombuffer(imgdata, size, "RGB")
        master_surface.blit(tempsurf, location)

    tasks = []
    for masterpos, pieces in plates.items():
        master_surface.fill((132, 170, 248))

        for x, y in pieces:
            task = processpool.submit(unpack, tempfiles, x, y, ((x % factor) * side, (y % factor) * side))
            tasks.append(threadpool.submit(render_base_to_master, task))
            tasks.append(task)
        current_area = masterside

        for task in tasks:
            task.result()
            done += 0.5
            loadingbar.set_progress(done * fraction, caption + " %4d of %4d" % (done, total))
        for z in range(zoom_levels):
            tasks = []
            pieces = masterside // current_area
            x_off = masterpos[0] * pieces
            y_off = masterpos[1] * pieces
            for xp in range(pieces):
                for yp in range(pieces):
                    temp = pygame.Surface.subsurface(master_surface,
                                                     (xp * current_area, yp * current_area, current_area, current_area))
                    filename = "screen_{}_{}_{}.png".format(z + 1, x_off + xp, y_off + yp)
                    data = pygame.image.tostring(temp, "RGB")
                    tasks.append(processpool.submit(render_plate, data, tilefolder, temp.get_size(), side, filename))

            for task in tasks:
                task.result()
                done += 1
                loadingbar.set_progress(done * fraction, caption + " %4d of %4d" % (done, total))
            current_area //= 2
    processpool.shutdown()
开发者ID:goofwear,项目名称:omnitool,代码行数:59,代码来源:render.py

示例4: post

# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import shutdown [as 别名]
 def post(self):
     file = self.request.files['file'][0]
     hark.client.login()
     hark.client.createSession(default_hark_config)
     log.info("Uploading asynchrounously")
     pool = ProcessPoolExecutor(max_workers=2)
     future = pool.submit(async_upload, file)
     yield future
     pool.shutdown()
     log.info("Rendering visualization page")
     self.render('visualize.html')
开发者ID:alepcat1710,项目名称:HarkVisualizer,代码行数:13,代码来源:harkvisualizer.py

示例5: ConcurrentDownloader

# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import shutdown [as 别名]
class ConcurrentDownloader(BaseDownloader, ConcurrentMixin):
    """Concurrent ProcessPoolExecutor downloader

    :param pool_size: size of ThreadPoolExecutor
    :param timeout: request timeout in seconds
    """
    def __init__(
            self, worker_class,
            worker_kwargs=None, pool_size=5, middlewares=None,):

        # configure executor
        self.pool_size = pool_size
        self.executor = ProcessPoolExecutor(max_workers=self.pool_size)

        # prepare worker params
        self.worker_params = {
            'worker_class': worker_class,
            'worker_kwargs': worker_kwargs or {},
        }

        # ctrl-c support for python2.x
        # trap sigint
        signal.signal(signal.SIGINT, lambda s, f: s)

        super(ConcurrentDownloader, self).__init__(
            middlewares=middlewares
        )

    def get(self, requests):

        for request in requests:
            # delegate request processing to the executor
            future = self.executor.submit(
                _run_download_worker, self.worker_params, request,
            )

            # build Planned object
            done_future = Planned()

            # when executor finish request - fire done_future
            future.add_done_callback(
                partial(self._done, request, done_future)
            )

            yield done_future

    def get_workers_count(self):
        return self.pool_size

    def stop(self):
        self.executor.shutdown()
开发者ID:danielnaab,项目名称:pomp,代码行数:53,代码来源:concurrenttools.py

示例6: make_arch_db

# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import shutdown [as 别名]
def make_arch_db():
    executor = ProcessPoolExecutor(max_workers=8)
    by = 10000
    m = 60000000
    #by = 2000
    #m = 10000
    e = executor.map(process_range, zip(range(0, m, by),range(by, m+by, by)))
    executor.shutdown()
    print('done calculating architectures')
    pfam_sets = merge(e)
    print(len(pfam_sets))
    gsave(pfam_sets,'pfam_sets.pkl.gz')
    
    # mongodb
    db = MongoClient('wl-cmadmin', 27017).ArchDB_Pfam_071414.ArchDB_Pfam_071414
    db.insert(map(lambda item: {'_id': min(item[1]), 'pID': list(item[1]), 'Pfam': item[0]}, pfam_sets.items()))
    db.ensure_index('pID')
    db.ensure_index('Pfam')
开发者ID:mmayers12,项目名称:n15_mice,代码行数:20,代码来源:pfam_sets.py

示例7: ProcessPoolEvaluator

# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import shutdown [as 别名]
class ProcessPoolEvaluator(SubmitEvaluator):
    
    def __init__(self, processes=None):
        try:
            from concurrent.futures import ProcessPoolExecutor
            self.executor = ProcessPoolExecutor(processes)
            super(ProcessPoolEvaluator, self).__init__(self.executor.submit)
            LOGGER.log(logging.INFO, "Started process pool evaluator")
            
            if processes:
                LOGGER.log(logging.INFO, "Using user-defined number of processes: %d", processes)
        except ImportError:
            # prevent error from showing in Eclipse if concurrent.futures not available
            raise
        
    def close(self):
        LOGGER.log(logging.DEBUG, "Closing process pool evaluator")
        self.executor.shutdown()
        LOGGER.log(logging.INFO, "Closed process pool evaluator")
开发者ID:Project-Platypus,项目名称:Platypus,代码行数:21,代码来源:evaluator.py

示例8: test_executor

# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import shutdown [as 别名]
    def test_executor(self):
        m = aioprocessing.AioManager()
        q = m.AioQueue()
        p = ProcessPoolExecutor(max_workers=1)
        val = 4
        def submit():
            yield p.submit(queue_put, q, val)
        next(submit())

        @asyncio.coroutine
        def queue_get():
            out = yield from q.coro_get()
            self.assertEqual(out, val)
            yield from q.coro_put(5)

        self.loop.run_until_complete(queue_get())
        returned = q.get()
        self.assertEqual(returned, 5)
        p.shutdown()
开发者ID:croepha,项目名称:aioprocessing,代码行数:21,代码来源:queue_test.py

示例9: infer_all

# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import shutdown [as 别名]
def infer_all(db_name):
    db = pymongo.MongoClient('127.0.0.1', 27017, connect=False).get_database(db_name)
    executor = ProcessPoolExecutor(max_workers=10)

    futures = []
    for collection_name in db.collection_names():
        if not is_q_col(collection_name):
            continue
        tid = collection_name[:-2]
        q_collection = db[collection_name]
        a_collection = db[q_to_a(collection_name)]
        for q_doc in q_collection.find({}, {'qid':1, 'topic':1}):
            qid = q_doc['qid']
            aids = [a_doc['aid'] for a_doc in
                    a_collection.find({'qid': qid}, {'aid': 1})]
            futures.append(
                executor.submit(infer_question_task(db_name, tid, qid, aids))
            )

    executor.shutdown()
开发者ID:Hank-wood,项目名称:Graduate,代码行数:22,代码来源:dynamic_infer.py

示例10: execute_parallel

# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import shutdown [as 别名]
    def execute_parallel(self, executor=None, loop=None):

        if executor is None:
            executor = ProcessPoolExecutor()
            shut_executor = True
        else:
            shut_executor = False

        if loop is None:
            loop = asyncio.get_event_loop()

        deps = self.graph.dependency_resolver()
        next_specs = deps.send(None)


        task = loop.create_task(self.submit_next_specs(loop, executor,
                                                    next_specs, deps))
        loop.run_until_complete(task)

        if shut_executor:
            executor.shutdown()
开发者ID:Zaharid,项目名称:reportengine,代码行数:23,代码来源:resourcebuilder.py

示例11: main

# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import shutdown [as 别名]
def main(argv=None):
    usage = """REDCap Data Model Generator

    Usage:
        redcap dball <version> [--dir=DIR] [--db=DB] [--host=HOST] [--port=PORT] [--user=USER] [--pass=PASS]

    Options:
        -h --help       Show this screen.
        --dir=DIR       Name of the directory to output the files [default: .].
        --db=DB         Name of the REDCap database [default: redcap].
        --host=HOST     Host of the database server [default: localhost].
        --port=PORT     Port of the database server [default: 3306].
        --user=USER     Username to connect with.
        --pass=PASS     Password to connect with. If set to *, a prompt will be provided.
        --procs=PROCS   Number of processes to spawn [default: 24].

    """  # noqa

    from docopt import docopt

    args = docopt(usage, argv=argv, version='0.1')

    if args['--pass'] == '*':
        args['--pass'] = getpass('password: ')

    conn = db_connect(args['--db'],
                      args['--host'],
                      args['--port'],
                      args['--user'],
                      args['--pass'])

    project_names = db_projects(conn)

    pool = ProcessPoolExecutor(max_workers=int(args['--procs']))

    for name in project_names:
        pool.submit(worker, name, args)

    pool.shutdown()
开发者ID:chop-dbhi,项目名称:data-models-generator,代码行数:41,代码来源:rc_all.py

示例12: infer_many

# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import shutdown [as 别名]
def infer_many(db_name, filename):
    """
    推断一些问题的回答, 读取文件, 每一行格式为
    topic,qid,...(后面是什么无所谓)
    """
    db = pymongo.MongoClient('127.0.0.1', 27017, connect=False).get_database(db_name)
    executor = ProcessPoolExecutor(max_workers=5)

    count = 0
    futures = []
    with open(filename) as f:
        for line in f:
            tid, qid, _ = line.split(',', maxsplit=2)
            a_collection = db[a_col(tid)]
            aids = [a_doc['aid'] for a_doc in
                    a_collection.find({'qid': qid}, {'aid': 1})]
            futures.append(
                executor.submit(infer_question_task, db_name, tid, qid, aids)
            )
            count += len(aids)

    print(count)
    executor.shutdown()
开发者ID:Hank-wood,项目名称:Graduate,代码行数:25,代码来源:dynamic_infer.py

示例13: Main

# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import shutdown [as 别名]
def Main():
  global gSymFileManager, gOptions, gPool

  if not ReadConfigFile():
    return 1

  # In a perfect world, we could create a process per cpu core.
  # But then we'd have to deal with cache sharing
  gPool = Pool(1)
  gPool.submit(initializeSubprocess, gOptions)

  # Setup logging in the parent process.
  # Ensure this is called after the call to initializeSubprocess to
  # avoid duplicate messages in Unix systems.
  SetLoggingOptions(gOptions["Log"])

  LogMessage("Starting server with the following options:\n" + str(gOptions))

  app = Application([
    url(r'/(debug)', DebugHandler),
    url(r'/(nodebug)', DebugHandler),
    url(r"(.*)", SymbolHandler)])

  app.listen(gOptions['portNumber'], gOptions['hostname'])

  try:
    # select on Windows doesn't return on ctrl-c, add a periodic
    # callback to make ctrl-c responsive
    if sys.platform == 'win32':
        PeriodicCallback(lambda: None, 100).start()
    IOLoop.current().start()
  except KeyboardInterrupt:
    LogMessage("Received SIGINT, stopping...")

  gPool.shutdown()
  LogMessage("Server stopped - " + gOptions['hostname'] + ":" + str(gOptions['portNumber']))
  return 0
开发者ID:ddurst,项目名称:Snappy-Symbolication-Server,代码行数:39,代码来源:symbolicationWebService.py

示例14: DataRouter

# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import shutdown [as 别名]
class DataRouter(object):
    def __init__(self,
                 project_dir=None,
                 max_training_processes=1,
                 response_log=None,
                 emulation_mode=None,
                 remote_storage=None,
                 component_builder=None):
        self._training_processes = max(max_training_processes, 1)
        self._current_training_processes = 0
        self.responses = self._create_query_logger(response_log)
        self.project_dir = config.make_path_absolute(project_dir)
        self.emulator = self._create_emulator(emulation_mode)
        self.remote_storage = remote_storage

        if component_builder:
            self.component_builder = component_builder
        else:
            self.component_builder = ComponentBuilder(use_cache=True)

        self.project_store = self._create_project_store(project_dir)
        self.pool = ProcessPool(self._training_processes)

    def __del__(self):
        """Terminates workers pool processes"""
        self.pool.shutdown()

    @staticmethod
    def _create_query_logger(response_log):
        """Create a logger that will persist incoming query results."""

        # Ensures different log files for different
        # processes in multi worker mode
        if response_log:
            # We need to generate a unique file name,
            # even in multiprocess environments
            timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
            log_file_name = "rasa_nlu_log-{}-{}.log".format(timestamp,
                                                            os.getpid())
            response_logfile = os.path.join(response_log, log_file_name)
            # Instantiate a standard python logger,
            # which we are going to use to log requests
            utils.create_dir_for_file(response_logfile)
            out_file = io.open(response_logfile, 'a', encoding='utf8')
            query_logger = Logger(
                    observer=jsonFileLogObserver(out_file, recordSeparator=''),
                    namespace='query-logger')
            # Prevents queries getting logged with parent logger
            # --> might log them to stdout
            logger.info("Logging requests to '{}'.".format(response_logfile))
            return query_logger
        else:
            # If the user didn't provide a logging directory, we wont log!
            logger.info("Logging of requests is disabled. "
                        "(No 'request_log' directory configured)")
            return None

    def _collect_projects(self, project_dir):
        if project_dir and os.path.isdir(project_dir):
            projects = os.listdir(project_dir)
        else:
            projects = []

        projects.extend(self._list_projects_in_cloud())
        return projects

    def _create_project_store(self, project_dir):
        projects = self._collect_projects(project_dir)

        project_store = {}

        for project in projects:
            project_store[project] = Project(self.component_builder,
                                             project,
                                             self.project_dir,
                                             self.remote_storage)

        if not project_store:
            default_model = RasaNLUModelConfig.DEFAULT_PROJECT_NAME
            project_store[default_model] = Project(
                    project=RasaNLUModelConfig.DEFAULT_PROJECT_NAME,
                    project_dir=self.project_dir,
                    remote_storage=self.remote_storage)
        return project_store

    def _pre_load(self, projects):
        logger.debug("loading %s", projects)
        for project in self.project_store:
            if project in projects:
                self.project_store[project].load_model()

    def _list_projects_in_cloud(self):
        try:
            from rasa_nlu.persistor import get_persistor
            p = get_persistor(self.remote_storage)
            if p is not None:
                return p.list_projects()
            else:
                return []
        except Exception:
#.........这里部分代码省略.........
开发者ID:githubclj,项目名称:rasa_nlu,代码行数:103,代码来源:data_router.py

示例15: main

# 需要导入模块: from concurrent.futures import ProcessPoolExecutor [as 别名]
# 或者: from concurrent.futures.ProcessPoolExecutor import shutdown [as 别名]
def main(vcf, covariates, formula, min_qual, min_genotype_qual, min_samples,
        weighted=False, as_vcf=False, exclude_nan=False, groups=None):
    #if weighted == "FALSE": weighted = False
    #else:
    #    weight_fn = {'log10': np.log10, 'log': np.log, 'GQ': np.array}[weighted]
    if covariates.endswith('.csv'):
        covariate_df = pd.read_csv(covariates, index_col=0)
    else:
        covariate_df = pd.read_table(covariates, index_col=0)
    covariate_df.index = [str(x) for x in covariate_df.index]
    gmatrix = {}

    if groups == 'covariance':
        assert op.isfile(vcf), ('need to iterate over vcf 2x')
        cov = get_covariance(_get_genotypes(vcf, min_qual,
                                min_genotype_qual, min_samples, as_vcf))
        groups = pd.DataFrame(cov, index=covariate_df.index,
                columns=covariate_df.index)
        print(groups)
        # NOTE: currently using GLS and a covariance matrix but we assume
        # a binary dependent variable so estimates are off.

    po = ProcessPoolExecutor(1)

    for i, (samples, genos, quals, variant) in enumerate(
            _get_genotypes(vcf, min_qual, min_genotype_qual, min_samples,
                           as_vcf)):
        if i == 0 and not samples is None:
            # make sure we have covariates for all samples in the vcf
            assert not set(samples).difference(covariate_df.index),\
                        set(samples).difference(covariate_df.index)
            covariate_df = covariate_df.ix[samples,:]
        covariate_df['genotype'] = genos

        if samples is None:
            if exclude_nan: continue
            res = {'OR': np.nan, 'pvalue': np.nan, 'z': np.nan, 'OR_CI':
                    (np.nan, np.nan), 'xtab': 'NA'}
        else:
            xtab_future = po.submit(xtab, formula, covariate_df)
            try:
                res = vcfassoc(formula, covariate_df, groups)
                gmatrix['{CHROM}:{POS}'.format(**variant)] = genos
            except np.linalg.linalg.LinAlgError:
                res = {'OR': np.nan, 'pvalue': np.nan, 'z': np.nan, 'OR_CI':
                        (np.nan, np.nan)}
            except statsmodels.tools.sm_exceptions.PerfectSeparationError:
                print("WARNING: perfect separation, too few samples(?)",
                      ": setting to -9: {CHROM}:{POS}".format(**variant),
                      file=sys.stderr)
                res = {}
                res['z'] = res['OR'] = np.nan
                res['pvalue'] = -9.0 # blech.
                res['OR_CI'] = np.nan, np.nan
                gmatrix['{CHROM}:{POS}'.format(**variant)] = genos
            except IndexError:
                continue
            res['xtab'] = xtab_future.result()
            #res['xtab'] = xtab(formula, covariate_df)
        print_result(res, variant, as_vcf, i)

    l1_regr(pd.DataFrame(gmatrix), covariate_df, formula)
    po.shutdown()
开发者ID:brentp,项目名称:vcfassoc,代码行数:65,代码来源:vcfassoc.py


注:本文中的concurrent.futures.ProcessPoolExecutor.shutdown方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。