当前位置: 首页>>代码示例>>Python>>正文


Python pool.terminate函数代码示例

本文整理汇总了Python中multiprocessing.pool.terminate函数的典型用法代码示例。如果您正苦于以下问题:Python terminate函数的具体用法?Python terminate怎么用?Python terminate使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了terminate函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: ScopedPool

def ScopedPool(*args, **kwargs):
  """Context Manager which returns a multiprocessing.pool instance which
  correctly deals with thrown exceptions.

  *args - Arguments to multiprocessing.pool

  Kwargs:
    kind ('threads', 'procs') - The type of underlying coprocess to use.
    **etc - Arguments to multiprocessing.pool
  """
  if kwargs.pop('kind', None) == 'threads':
    pool = multiprocessing.pool.ThreadPool(*args, **kwargs)
  else:
    orig, orig_args = kwargs.get('initializer'), kwargs.get('initargs', ())
    kwargs['initializer'] = _ScopedPool_initer
    kwargs['initargs'] = orig, orig_args
    pool = multiprocessing.pool.Pool(*args, **kwargs)

  try:
    yield pool
    pool.close()
  except:
    pool.terminate()
    raise
  finally:
    pool.join()
开发者ID:duanwujie,项目名称:depot_tools,代码行数:26,代码来源:git_common.py

示例2: finalize

    def finalize(self):
        pool = self._pool

        self._next = None
        self._pool = None
        if pool is not None:
            pool.terminate()
开发者ID:ktnyt,项目名称:chainer,代码行数:7,代码来源:multithread_iterator.py

示例3: test_no_thread_pool

def test_no_thread_pool():
    pool = xmon_stepper.ThreadlessPool()
    result = pool.map(lambda x: x + 1, range(10))
    assert result == [x + 1 for x in range(10)]
    # No ops.
    pool.terminate()
    pool.join()
开发者ID:google2013,项目名称:Cirq,代码行数:7,代码来源:xmon_stepper_test.py

示例4: system_status

def system_status():
    from status_checks import run_checks
    class WebOutput:
        def __init__(self):
            self.items = []

        def add_heading(self, heading):
            self.items.append({"type": "heading", "text": heading, "extra": []})

        def print_ok(self, message):
            self.items.append({"type": "ok", "text": message, "extra": []})

        def print_error(self, message):
            self.items.append({"type": "error", "text": message, "extra": []})

        def print_warning(self, message):
            self.items.append({"type": "warning", "text": message, "extra": []})

        def print_line(self, message, monospace=False):
            self.items[-1]["extra"].append({"text": message, "monospace": monospace})

    output = WebOutput()
    # Create a temporary pool of processes for the status checks
    pool = multiprocessing.pool.Pool(processes=5)
    run_checks(False, env, output, pool)
    pool.terminate()
    return json_response(output.items)
开发者ID:jkaberg,项目名称:mailinabox,代码行数:27,代码来源:daemon.py

示例5: _run_tests

  def _run_tests(self):
    pool = multiprocessing.pool.ThreadPool(processes=self.suite_concurrency)
    outstanding_suites = []
    for suite in self.suite_runners:
      suite.task = pool.apply_async(suite.run)
      outstanding_suites.append(suite)

    ret = True
    try:
      while len(outstanding_suites) > 0:
        for suite in list(outstanding_suites):
          if suite.timed_out():
            msg = "Task %s not finished within timeout %s" % (suite.name,
                suite.suite.timeout_minutes,)
            logging.error(msg)
            raise Exception(msg)
          task = suite.task
          if task.ready():
            this_task_ret = task.get()
            outstanding_suites.remove(suite)
            if this_task_ret:
              logging.info("Suite %s succeeded.", suite.name)
            else:
              logging.info("Suite %s failed.", suite.name)
              ret = False
        time.sleep(5)
    except KeyboardInterrupt:
      logging.info("\n\nDetected KeyboardInterrupt; shutting down!\n\n")
      raise
    finally:
      pool.terminate()
    return ret
开发者ID:twmarshall,项目名称:Impala,代码行数:32,代码来源:test-with-docker.py

示例6: _create_process_pool

def _create_process_pool(processes=1):
    if _MULTIPROCESSING and processes:
        logger.info("creating pool with %i workers", processes)
        pool = multiprocessing.pool.Pool(processes=processes)
    else:
        logger.info("creating dummy pool")
        pool = DummyPool()
    yield pool
    pool.terminate()
开发者ID:mpenkov,项目名称:smart_open,代码行数:9,代码来源:s3.py

示例7: s3_iter_bucket

def s3_iter_bucket(bucket, prefix="", accept_key=lambda key: True, key_limit=None, workers=16):
    """
    Iterate and download all S3 files under `bucket/prefix`, yielding out
    `(key, key content)` 2-tuples (generator).

    `accept_key` is a function that accepts a key name (unicode string) and
    returns True/False, signalling whether the given key should be downloaded out or
    not (default: accept all keys).

    If `key_limit` is given, stop after yielding out that many results.

    The keys are processed in parallel, using `workers` processes (default: 16),
    to speed up downloads greatly. If multiprocessing is not available, thus
    NO_MULTIPROCESSING is True, this parameter will be ignored.

    Example::

      >>> mybucket = boto.connect_s3().get_bucket('mybucket')

      >>> # get all JSON files under "mybucket/foo/"
      >>> for key, content in s3_iter_bucket(mybucket, prefix='foo/', accept_key=lambda key: key.endswith('.json')):
      ...     print key, len(content)

      >>> # limit to 10k files, using 32 parallel workers (default is 16)
      >>> for key, content in s3_iter_bucket(mybucket, key_limit=10000, workers=32):
      ...     print key, len(content)

    """
    total_size, key_no = 0, -1
    keys = (key for key in bucket.list(prefix=prefix) if accept_key(key.name))

    if NO_MULTIPROCESSING:
        logger.info("iterating over keys from %s without multiprocessing" % bucket)
        iterator = imap(s3_iter_bucket_process_key, keys)
    else:
        logger.info("iterating over keys from %s with %i workers" % (bucket, workers))
        pool = multiprocessing.pool.Pool(processes=workers)
        iterator = pool.imap_unordered(s3_iter_bucket_process_key, keys)

    for key_no, (key, content) in enumerate(iterator):
        if key_no % 1000 == 0:
            logger.info(
                "yielding key #%i: %s, size %i (total %.1fMB)" % (key_no, key, len(content), total_size / 1024.0 ** 2)
            )

        yield key, content
        key.close()
        total_size += len(content)

        if key_limit is not None and key_no + 1 >= key_limit:
            # we were asked to output only a limited number of keys => we're done
            break

    if not NO_MULTIPROCESSING:
        pool.terminate()

    logger.info("processed %i keys, total size %i" % (key_no + 1, total_size))
开发者ID:val314159,项目名称:smart_open,代码行数:57,代码来源:smart_open_lib.py

示例8: buildList

    def buildList(self):
        """
        Build the artifact "list" from sources defined in the given configuration.

        :returns: Dictionary described above.
        """
        priority = 0
        pool_dict = {}

        for source in self.configuration.artifactSources:
            priority += 1
            pool = pool_dict.setdefault(source['type'], ThreadPool(self.MAX_THREADS_DICT[source['type']]))
            pool.apply_async(self._read_artifact_source, args=[source, priority],
                             callback=self._add_result)

        for pool in pool_dict.values():
            pool.close()

        at_least_1_runs = True
        all_keys = range(1, len(self.configuration.artifactSources) + 1)
        finished = False
        while at_least_1_runs:
            for i in range(30):
                time.sleep(1)

                if not self.errors.empty():
                    for pool in pool_dict.values():
                        logging.debug("Terminating pool %s", str(pool))
                        pool.terminate()
                    finished = True
                    break

            at_least_1_runs = False
            if not finished:            
                self.results_lock.acquire()
                finished = sorted(list(self.results.keys()))
                self.results_lock.release()
                if all_keys != finished:
                    logging.debug("Still waiting for priorities %s to finish", str(list(set(all_keys) - set(finished))))
                    at_least_1_runs = True

        for pool in pool_dict.values():
            if pool._state != multiprocessing.pool.TERMINATE:
                pool.join()

        if not self.errors.empty():
            raise RuntimeError("%i error(s) occured during reading of artifact list." % self.errors.qsize())

        return self._get_artifact_list()
开发者ID:jboss-eap,项目名称:maven-repository-builder,代码行数:49,代码来源:artifact_list_builder.py

示例9: thread_pool

def thread_pool( size ):
    """
    A context manager that yields a thread pool of the given size. On normal closing,
    this context manager closes the pool and joins all threads in it. On exceptions, the pool
    will be terminated but threads won't be joined.
    """
    pool = multiprocessing.pool.ThreadPool( processes=size )
    try:
        yield pool
    except:
        pool.terminate( )
        raise
    else:
        pool.close( )
        pool.join( )
开发者ID:arkal,项目名称:cgcloud,代码行数:15,代码来源:util.py

示例10: get_builds

def get_builds(db, jobs_dir, metadata, threads, client_class):
    """
    Adds information about tests to a dictionary.

    Args:
        jobs_dir: the GCS path containing jobs.
        metadata: a dict of metadata about the jobs_dir.
        threads: how many threads to use to download build information.
        client_class: a constructor for a GCSClient (or a subclass).
    """
    gcs = client_class(jobs_dir, metadata)

    print('Loading builds from %s' % jobs_dir)
    sys.stdout.flush()

    builds_have = db.get_existing_builds(jobs_dir)
    print('already have %d builds' % len(builds_have))
    sys.stdout.flush()

    jobs_and_builds = gcs.get_builds(builds_have)
    pool = None
    if threads > 1:
        pool = multiprocessing.Pool(threads, mp_init_worker,
                                    (jobs_dir, metadata, client_class))
        builds_iterator = pool.imap_unordered(
            get_started_finished, jobs_and_builds)
    else:
        global WORKER_CLIENT  # pylint: disable=global-statement
        WORKER_CLIENT = gcs
        builds_iterator = (
            get_started_finished(job_build) for job_build in jobs_and_builds)

    try:
        for n, (build_dir, started, finished) in enumerate(builds_iterator):
            print(build_dir)
            if started or finished:
                db.insert_build(build_dir, started, finished)
            if n % 200 == 0:
                db.commit()
    except KeyboardInterrupt:
        if pool:
            pool.terminate()
        raise
    else:
        if pool:
            pool.close()
            pool.join()
    db.commit()
开发者ID:Kashomon,项目名称:test-infra,代码行数:48,代码来源:make_db.py

示例11: ScopedPool

def ScopedPool(*args, **kwargs):
  if kwargs.pop('kind', None) == 'threads':
    pool = multiprocessing.pool.ThreadPool(*args, **kwargs)
  else:
    orig, orig_args = kwargs.get('initializer'), kwargs.get('initargs', ())
    kwargs['initializer'] = initer
    kwargs['initargs'] = orig, orig_args
    pool = multiprocessing.pool.Pool(*args, **kwargs)

  try:
    yield pool
    pool.close()
  except:
    pool.terminate()
    raise
  finally:
    pool.join()
开发者ID:riannucci,项目名称:vimified,代码行数:17,代码来源:common.py

示例12: list_all_machines

def list_all_machines(cloud_ids, headers):
    "Given the cloud ids, runs in parallel queries to get all machines"
    def list_one_cloud(cloud_id):
        cloud_machines = requests.get('https://mist.io/clouds/%s/machines' % cloud_id, headers=headers)
        if cloud_machines.status_code == 200:
            machines = cloud_machines.json()
            for machine in machines:
                machine['cloud'] = cloud_id
            return machines
        return []

    pool = multiprocessing.pool.ThreadPool(8)
    results = pool.map(list_one_cloud, cloud_ids)
    pool.terminate()

    machines = []
    for result in results:
        machines.extend(result)
    return machines
开发者ID:mistio,项目名称:mist.io-api-examples,代码行数:19,代码来源:list_machines.py

示例13: master_progress

    def master_progress(mpu,num_processes,bucket,upload_list):
	    x=0
	    print "proc = ?? "  + str(num_processes)
	    while True:
		try:
			if x!=num_parts:
	#			logger.error(str(src.name) +" start " )
				pool = NoDaemonProcessPool(processes=num_processes)
				value = pool.map_async(do_part_upload, gen_args(x,fold_last,upload_list)).get(99999999)
				print "when to finish??????"
#			print "dadadada " + str(value)
			que.put(value)
			src.close()
                        mpu.complete_upload()
			logger.error(str(src.name) +" stop " )
			#proc = subprocess.Popen('date', stdout=subprocess.PIPE)
			#print stdout
			print "mpu.complete src name " +src.name
			#os.remove(src.name)
			#print "index in proc = "+str(FileList.index(uploadFileNames))
			lock.acquire()
			status_list[FileList.index(uploadFileNames)]='finish'
			print src.name +" finish  "+str (status_list)
			critical_threadnum(Total_Threadnum,Threadnum,num_processes)
			print uploadFileNames +" add back now is   " + str(Threadnum.value)
			lock.release()
			src.close()
			return value
#			pool.terminate()
                        break
		except KeyboardInterrupt:
			logger.warn("Received KeyboardInterrupt, canceling upload")
			pool.terminate()
			mpu.cancel_upload()
			print "keyboarddddddddddddddddddddddddddddddd"
			break
		except IOError:
			break
		except Exception, err:
			logger.error("Encountered an error, canceling upload aaaaaaaaaaaa")
			print src.name
			logger.error(str(src.name)+str(err))
开发者ID:zgbsfs,项目名称:storage,代码行数:42,代码来源:s3-mp-upload.py

示例14: _list_machines

    def _list_machines(self):
        self._machines = []
        backends = self.backends()
        # show only enabled backends
        # enabled_backends = [backend for backend in backends if backend.enabled]

        def _list_one(backend):
            machines = []
            try:
                machines = backend.machines()
            except:
                # could be a cloud with expired creds, so don't fail
                pass
            return machines

        pool = multiprocessing.pool.ThreadPool(10)
        results = pool.map(_list_one, backends)
        pool.terminate()
        for result in results:
            self._machines.extend(result)
开发者ID:vkefallinos,项目名称:mist.client,代码行数:20,代码来源:__init__.py

示例15: eval


#.........这里部分代码省略.........

            # Reset '_'

            globals_['_'] = locals_['_'] = None

        if globals_.get('__IN_EVAL__', None) is None:

            globals_['__IN_EVAL__'] = True

        result = _run(graph, root_nodes[0], globals_, locals_, {}, None, False)

        # 1

        for expr_return_value in result:

            globals_values.append(globals_)

            locals_values.append(locals_)

            return_values.append([expr_return_value])

        # N-1

        for (task_result, task_locals_, task_globals_) in tasks:

            return_values.append(task_result.get())

            locals_values.append(task_locals_)

            globals_values.append(task_globals_)

        # Reduce + _PythonectResult Grouping

        for item in return_values:

            # Is there _PythonectResult in item list?

            for sub_item in item:

                if isinstance(sub_item, _PythonectResult):

                    # 1st Time?

                    if sub_item.values['node'] not in reduces:

                        reduces[sub_item.values['node']] = []

                        # Add Place holder to mark the position in the return value list

                        return_value.append(_PythonectLazyRunner(sub_item.values['node']))

                    reduces[sub_item.values['node']] = reduces[sub_item.values['node']] + [sub_item.values]

                else:

                    return_value.append(sub_item)

        # Any _PythonectLazyRunner's?

        if reduces:

            for return_item_idx in xrange(0, len(return_value)):

                if isinstance(return_value[return_item_idx], _PythonectLazyRunner):

                    # Swap list[X] with list[X.go(reduces)]

                    return_value[return_item_idx] = pool.apply_async(return_value[return_item_idx].go, args=(graph, reduces))

            return_value = __resolve_and_merge_results(return_value)

        # [...] ?

        if return_value:

            # Single return value? (e.g. [1])

            if len(return_value) == 1:

                return_value = return_value[0]

            # Update globals_ and locals_

#            globals_, locals_ = __merge_all_globals_and_locals(globals_, locals_, globals_values, {}, locals_values, {})

        # Set `return value` as `_`

        globals_['_'] = locals_['_'] = return_value

        if globals_.get('__IN_EVAL__', None) is not None:

            del globals_['__IN_EVAL__']

        pool.close()

        pool.join()

        pool.terminate()

    return return_value
开发者ID:ESSL-CQQ,项目名称:pythonect,代码行数:101,代码来源:eval.py


注:本文中的multiprocessing.pool.terminate函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。