当前位置: 首页>>代码示例>>Python>>正文


Python multiprocessing.Event方法代码示例

本文整理汇总了Python中multiprocessing.Event方法的典型用法代码示例。如果您正苦于以下问题:Python multiprocessing.Event方法的具体用法?Python multiprocessing.Event怎么用?Python multiprocessing.Event使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing的用法示例。


在下文中一共展示了multiprocessing.Event方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Event [as 别名]
def __init__(self, data_queue, data_paths, repeat=True):
        '''
        data_queue : Multiprocessing queue
        data_paths : list of data and label pair used to load data
        repeat : if set True, return data until exit is set
        '''
        super(DataProcess, self).__init__()
        # Queue to transfer the loaded mini batches
        self.data_queue = data_queue
        self.data_paths = data_paths
        self.num_data = len(data_paths)
        self.repeat = repeat

        # Tuple of data shape
        self.batch_size = cfg.CONST.BATCH_SIZE
        self.exit = Event()
        self.shuffle_db_inds() 
开发者ID:chrischoy,项目名称:3D-R2N2,代码行数:19,代码来源:data_process.py

示例2: start_root_lock_manager

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Event [as 别名]
def start_root_lock_manager(self, uuid):
        """Starts a thread that keeps a lock open."""
        if self.single_threaded_mode:
            return

        logging.debug("starting lock manager for {}".format(uuid))

        # we use this event for a controlled shutdown
        self.lock_manager_control_event = threading.Event()

        # start a thread that sends keep alives every N seconds
        self.lock_keepalive_thread = threading.Thread(target=self.root_lock_manager_loop,
                                                           name="Lock Manager ({})".format(uuid),
                                                           args=(uuid,))
        self.lock_keepalive_thread.daemon = True # we want this thread to die if the process dies
        self.lock_keepalive_thread.start() 
开发者ID:IntegralDefense,项目名称:ACE,代码行数:18,代码来源:__init__.py

示例3: test_writer_blocks_reader

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Event [as 别名]
def test_writer_blocks_reader(self):
        with self.lock.for_write():
            event = multiprocessing.Event()

            def test():
                self.assert_writer()

                # Caller will block until this event is released.
                event.set()

                with self.lock.for_read():
                    self.assert_readers(1)
                    return 'read'

            r = self.async(test)

            # Wait until we can confirm that the reader is locked out.
            event.wait()
            self.assert_writer()

        self.assertEqual('read', self.get_result(r))
        self.assert_unlocked() 
开发者ID:bslatkin,项目名称:ringbuffer,代码行数:24,代码来源:test_ringbuffer.py

示例4: test_wait_for_write

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Event [as 别名]
def test_wait_for_write(self):
        event = multiprocessing.Event()
        wait_count = 0

        with self.lock.for_read():

            def test():
                with self.lock.for_write():
                    self.assert_writer()
                    event.set()
                    return 'written'

            writer = self.async(test)

            while not event.is_set():
                self.assert_readers(1)
                wait_count += 1
                self.lock.wait_for_write()
                self.assert_readers(1)

        self.assertEqual('written', self.get_result(writer))
        self.assert_unlocked()
        self.assertLessEqual(wait_count, 2) 
开发者ID:bslatkin,项目名称:ringbuffer,代码行数:25,代码来源:test_ringbuffer.py

示例5: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Event [as 别名]
def __init__(self, id, args, worker_address, sink_address):
        super().__init__()
        self.model_dir = args.model_dir
        self.config_fp = os.path.join(self.model_dir, 'bert_config.json')
        self.checkpoint_fp = os.path.join(self.model_dir, 'bert_model.ckpt')
        self.vocab_fp = os.path.join(args.model_dir, 'vocab.txt')
        self.tokenizer = tokenization.FullTokenizer(vocab_file=self.vocab_fp)
        self.max_seq_len = args.max_seq_len
        self.worker_id = id
        self.daemon = True
        self.model_fn = model_fn_builder(
            bert_config=modeling.BertConfig.from_json_file(self.config_fp),
            init_checkpoint=self.checkpoint_fp,
            pooling_strategy=args.pooling_strategy,
            pooling_layer=args.pooling_layer
        )
        os.environ['CUDA_VISIBLE_DEVICES'] = str(self.worker_id)
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.per_process_gpu_memory_fraction = args.gpu_memory_fraction
        self.estimator = Estimator(self.model_fn, config=RunConfig(session_config=config))
        self.exit_flag = multiprocessing.Event()
        self.logger = set_logger('WORKER-%d' % self.worker_id)
        self.worker_address = worker_address
        self.sink_address = sink_address 
开发者ID:a414351664,项目名称:Bert-TextClassification,代码行数:27,代码来源:server.py

示例6: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Event [as 别名]
def __init__(self, id, args, worker_address_list, sink_address, device_id, graph_path, graph_config):
        super().__init__()
        self.worker_id = id
        self.device_id = device_id
        self.logger = set_logger(colored('WORKER-%d' % self.worker_id, 'yellow'), args.verbose)
        self.max_seq_len = args.max_seq_len
        self.do_lower_case = args.do_lower_case
        self.mask_cls_sep = args.mask_cls_sep
        self.daemon = True
        self.exit_flag = multiprocessing.Event()
        self.worker_address = worker_address_list
        self.num_concurrent_socket = len(self.worker_address)
        self.sink_address = sink_address
        self.prefetch_size = args.prefetch_size if self.device_id > 0 else None  # set to zero for CPU-worker
        self.gpu_memory_fraction = args.gpu_memory_fraction
        self.model_dir = args.model_dir
        self.verbose = args.verbose
        self.graph_path = graph_path
        self.bert_config = graph_config
        self.use_fp16 = args.fp16
        self.show_tokens_to_client = args.show_tokens_to_client
        self.no_special_token = args.no_special_token
        self.is_ready = multiprocessing.Event() 
开发者ID:hanxiao,项目名称:bert-as-service,代码行数:25,代码来源:__init__.py

示例7: task

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Event [as 别名]
def task():
    """Create a task to test the validators.

    """
    class Tester(CheckTask):
        """Class for testing feval validators.

        """
        feval = Str()

    root = RootTask(should_stop=Event(), should_pause=Event())
    task = Tester(name='test', database_entries={'val': 1})
    loop = LoopTask(name='Loop', task=task)
    root.add_child_task(0, loop)
    yield task
    del root.should_pause
    del root.should_stop
    gc.collect() 
开发者ID:Exopy,项目名称:exopy,代码行数:20,代码来源:test_validators.py

示例8: flush_queue

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Event [as 别名]
def flush_queue(period, run_sig,
                result_queue, result_list):
    """
    For flushing the queue periodically to a list so it doesn't fill up.

    Args:
        period: flush the result_queue to result_list every period seconds
        run_sig: terminate when the Event run_sig is cleared.
        result_queue: the queue in which results accumulate before being flushed
        result_list: the final list of results.
    """
    while run_sig.is_set():
        time.sleep(period)
        while not result_queue.empty():
            item = result_queue.get()
            result_list.append(item)


# Main Body 
开发者ID:nevillegrech,项目名称:MadMax,代码行数:21,代码来源:analyse.py

示例9: start

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Event [as 别名]
def start(self):
        parent_conn, child_conn = multiprocessing.Pipe()
        event = multiprocessing.Event()

        self.__conn = parent_conn
        self.__run_process = multiprocessing.Process(target=self.run, args=(child_conn, event))
        self.__run_process.start()

        # To avoid defunct process
        self.__join_thread = threading.Thread(target=self.wait)
        self.__join_thread.start()

        # If no sleep then CommonProcess will be terminated with exitcode SIGSEGV.
        # It may be python bug/
        time.sleep(conf.SLEEP_SECONDS_FOR_INIT_COMMON_PROCESS)
        event.wait() 
开发者ID:icon-project,项目名称:loopchain,代码行数:18,代码来源:common_process.py

示例10: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Event [as 别名]
def __init__(self, data_queue, store_reference=None,
                 split=Constants.csv_delimiter,
                 consumer_timeout=Constants.parser_timeout_ms):
        """

        :param data_queue: Reference to Queue where processed data will be put.
        :type data_queue: multiprocessing Queue.
        :param store_reference: Reference to CSVProcess instance, if needed.
        :type store_reference: CSVProcess (multiprocessing.Process)
        :param split: Delimiter in incoming data.
        :type split: str.
        :param consumer_timeout: Time to wait after emptying the internal buffer before next parsing.
        :type consumer_timeout: float.
        """
        multiprocessing.Process.__init__(self)
        self._exit = multiprocessing.Event()
        self._in_queue = multiprocessing.Queue()
        self._out_queue = data_queue
        self._consumer_timeout = consumer_timeout
        self._split = split
        self._store_reference = store_reference
        Log.d(TAG, "Process ready") 
开发者ID:ssepulveda,项目名称:RTGraph,代码行数:24,代码来源:Parser.py

示例11: __init__

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Event [as 别名]
def __init__(self, filename=None, path=None, timeout=0.5):
        """
        Sets up the file to export the data as CSV.
        If filename is not specified, a default name based on time will be used.
        :param filename: Name of the file where data will be exported.
        :type filename: str.
        :param path: Path where data file will be saved.
        :type path: str.
        :param timeout: Time to wait after emptying the internal buffer before next write.
        :type timeout: float.
        """
        multiprocessing.Process.__init__(self)
        self._exit = multiprocessing.Event()
        self._store_queue = multiprocessing.Queue()
        self._csv = None
        self._file = None
        self._timeout = timeout

        if filename is None:
            filename = strftime(Constants.csv_default_filename, gmtime())
        self._file = self._create_file(filename, path=path)
        Log.i(TAG, "Process ready") 
开发者ID:ssepulveda,项目名称:RTGraph,代码行数:24,代码来源:Csv.py

示例12: start

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Event [as 别名]
def start(self, workers=1, max_queue_size=10):
        def data_generator_task():
            while not self._stop_event.is_set():
                try:
                    if self._use_multiprocessing or self.queue.qsize() < max_queue_size:
                        generator_output = next(self._generator)
                        self.queue.put(generator_output)
                    else:
                        time.sleep(self.wait_time)
                except Exception:
                    self._stop_event.set()
                    raise

        try:
            if self._use_multiprocessing:
                self.queue = multiprocessing.Queue(maxsize=max_queue_size)
                self._stop_event = multiprocessing.Event()
            else:
                self.queue = queue.Queue()
                self._stop_event = threading.Event()

            for _ in range(workers):
                if self._use_multiprocessing:
                    # Reset random seed else all children processes
                    # share the same seed
                    np.random.seed(self.random_seed)
                    thread = multiprocessing.Process(target=data_generator_task)
                    thread.daemon = True
                    if self.random_seed is not None:
                        self.random_seed += 1
                else:
                    thread = threading.Thread(target=data_generator_task)
                self._threads.append(thread)
                thread.start()
        except:
            self.stop()
            raise 
开发者ID:zzzDavid,项目名称:ICDAR-2019-SROIE,代码行数:39,代码来源:data_util.py

示例13: _process_worker

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Event [as 别名]
def _process_worker(call_queue, result_queue):
    """Evaluates calls from call_queue and places the results in result_queue.

    This worker is run in a separate process.

    Args:
        call_queue: A multiprocessing.Queue of _CallItems that will be read and
            evaluated by the worker.
        result_queue: A multiprocessing.Queue of _ResultItems that will written
            to by the worker.
        shutdown: A multiprocessing.Event that will be set as a signal to the
            worker that it should exit when call_queue is empty.
    """
    while True:
        call_item = call_queue.get(block=True)
        if call_item is None:
            # Wake up queue management thread
            result_queue.put(None)
            return
        try:
            r = call_item.fn(*call_item.args, **call_item.kwargs)
        except:
            e = sys.exc_info()[1]
            result_queue.put(_ResultItem(call_item.work_id,
                                         exception=e))
        else:
            result_queue.put(_ResultItem(call_item.work_id,
                                         result=r)) 
开发者ID:remg427,项目名称:misp42splunk,代码行数:30,代码来源:process.py

示例14: _process_worker

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Event [as 别名]
def _process_worker(call_queue, result_queue):
    """Evaluates calls from call_queue and places the results in result_queue.

    This worker is run in a separate process.

    Args:
        call_queue: A multiprocessing.Queue of _CallItems that will be read and
            evaluated by the worker.
        result_queue: A multiprocessing.Queue of _ResultItems that will written
            to by the worker.
        shutdown: A multiprocessing.Event that will be set as a signal to the
            worker that it should exit when call_queue is empty.
    """
    while True:
        call_item = call_queue.get(block=True)
        if call_item is None:
            # Wake up queue management thread
            result_queue.put(None)
            return
        try:
            r = call_item.fn(*call_item.args, **call_item.kwargs)
        except BaseException:
            e = sys.exc_info()[1]
            result_queue.put(_ResultItem(call_item.work_id,
                                         exception=e))
        else:
            result_queue.put(_ResultItem(call_item.work_id,
                                         result=r)) 
开发者ID:jmarth,项目名称:plugin.video.kmediatorrent,代码行数:30,代码来源:process.py

示例15: main

# 需要导入模块: import multiprocessing [as 别名]
# 或者: from multiprocessing import Event [as 别名]
def main():
    """ Prepare arguments, configurations, variables and run the event loop. """
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument("config", type=Path, help="Main config path, should include all hardeware")
    args = parser.parse_args()

    pid_path = '/tmp/derp_drive.pid'
    if derp.util.is_already_running(pid_path):
        return
    derp.util.write_pid(pid_path)

    config = derp.util.load_config(args.config)
    recording_path = derp.util.make_recording_path()
    derp.util.dump_config(config, recording_path / 'config.yaml')
    config['recording_path'] = recording_path
    logger = derp.util.init_logger('drive', config['recording_path'])

    component_map = {
        "brain": derp.brain.Clone,
        "camera": derp.camera.Camera,
        "imu": derp.imu.Imu,
        "joystick": derp.joystick.Joystick,
        "servo": derp.servo.Servo,
        "writer": derp.writer.Writer,
    }
    processes = []
    exit_event = Event()
    for name in sorted(component_map):
        if name not in config:
            logger.info("skip %s", name)
            continue
        proc_args = (config, exit_event, component_map[name])
        proc = Process(target=loop, name=name, args=proc_args)
        proc.start()
        processes.append(proc)
        logger.info("start %s %i", name, proc.pid)
    while all_running(processes):
        time.sleep(0.1)
    exit_event.set()
    logger.info("exit") 
开发者ID:notkarol,项目名称:derplearning,代码行数:42,代码来源:drive.py


注:本文中的multiprocessing.Event方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。