当前位置: 首页>>代码示例>>Python>>正文


Python multiprocessing.SimpleQueue类代码示例

本文整理汇总了Python中multiprocessing.SimpleQueue的典型用法代码示例。如果您正苦于以下问题:Python SimpleQueue类的具体用法?Python SimpleQueue怎么用?Python SimpleQueue使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了SimpleQueue类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: run

    def run(self, tasks, render, update, render_args=(), render_kwargs={}, update_args=(), update_kwargs={}):

        # establish ipc queues using a manager process
        task_queue = SimpleQueue()
        result_queue = SimpleQueue()

        # start process to generate image samples
        producer = Process(target=self._producer, args=(tasks, task_queue))
        producer.start()

        # start worker processes
        workers = []
        for pid in range(self._processes):
            p = Process(target=self._worker, args=(render, render_args, render_kwargs, task_queue, result_queue))
            p.start()
            workers.append(p)

        # consume results
        for _ in tasks:
            result = result_queue.get()
            update(result, *update_args, **update_kwargs)

        # shutdown workers
        for _ in workers:
            task_queue.put(None)
开发者ID:raysect,项目名称:source,代码行数:25,代码来源:workflow.py

示例2: export_table

def export_table(host, port, auth_key, db, table, directory, fields, delimiter, format,
                 error_queue, progress_info, sindex_counter, exit_event):
    writer = None

    try:
        # This will open at least one connection for each rdb_call_wrapper, which is
        # a little wasteful, but shouldn't be a big performance hit
        conn_fn = lambda: r.connect(host, port, auth_key=auth_key)
        table_info = rdb_call_wrapper(conn_fn, "info", write_table_metadata, db, table, directory)
        sindex_counter.value += len(table_info["indexes"])

        task_queue = SimpleQueue()
        writer = launch_writer(format, directory, db, table, fields, delimiter, task_queue, error_queue)
        writer.start()

        rdb_call_wrapper(conn_fn, "table scan", read_table_into_queue, db, table,
                         table_info["primary_key"], task_queue, progress_info, exit_event)
    except (r.ReqlError, r.ReqlDriverError) as ex:
        error_queue.put((RuntimeError, RuntimeError(ex.message), traceback.extract_tb(sys.exc_info()[2])))
    except:
        ex_type, ex_class, tb = sys.exc_info()
        error_queue.put((ex_type, ex_class, traceback.extract_tb(tb)))
    finally:
        if writer is not None and writer.is_alive():
            task_queue.put(StopIteration())
            writer.join()
开发者ID:HiroIshikawa,项目名称:21playground,代码行数:26,代码来源:_export.py

示例3: data_from_file

async def data_from_file(main2gvf: mp.SimpleQueue,
                         coder: KanervaCoder):
    data = np.load('offline_data.npy')
    for i, item in enumerate(data):
        # if i > 500:
        #     break
        item[-1] = coder(x1=item[-1], x2=item[-2])
        main2gvf.put(item)
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:8,代码来源:final_project.py

示例4: merge_db

def merge_db(db_folder, new_db_name, db_to_merge):

    assert path.exists(db_folder), '`{}` is a wrong path to db folder, please correct it.'.format(db_folder)

    shutdown = Event()
    writer_queue = SimpleQueue()

    writer = Writer(db_folder=db_folder, db_name=new_db_name, queue=writer_queue, shutdown=shutdown)
    reader = Reader(db_folder=db_folder, db_to_merge=db_to_merge,
                    queue=writer_queue, shutdown=shutdown)

    reader.start()
    writer.start()

    pbar = tqdm(total=len(db_to_merge))

    c = 0
    while not shutdown.is_set():
        try:
            new_c = writer.counter.value
            progress = new_c - c
            if progress > 0:
                pbar.update(progress)
                c = new_c
            Event().wait(2)

        except KeyboardInterrupt:
            print()
            print("Main thread grab the keyboard interrupt")
            break

    shutdown.set()
    pbar.close()
    # writer.join()
    # reader.join()

    print("writer alive", writer.is_alive())
    print("reader alive", reader.is_alive())

    if writer.is_alive():

        print("Waiting writer...")
        writer.join()

    print("WRITER EXECUTED")

    if reader.is_alive():
        print("Waiting reader...")
        writer_queue.get()
        print("Waiting reader 2...")
        reader.join()

    print("READER EXECUTED")

    print("Done.")
开发者ID:getzneet,项目名称:SpatialEconomy,代码行数:55,代码来源:merge_db.py

示例5: fork_process

def fork_process(logger, group=None, target=None, name=None, args=(), kwargs={}):
    """
    Forks a child, making sure that all exceptions from the child are safely sent to the parent
    If a target raises an exception, the exception is re-raised in the parent process
    @return tuple consisting of process exit code and target's return value
    """
    if is_windows():
        logger.warn(
            "Not forking for %s due to Windows incompatibilities (see #184). "
            "Measurements (coverage, etc.) might be biased." % target
        )
        return fake_windows_fork(group, target, name, args, kwargs)
    try:
        sys.modules["tblib.pickling_support"]
    except KeyError:
        import tblib.pickling_support

        tblib.pickling_support.install()

    q = SimpleQueue()

    def instrumented_target(*args, **kwargs):
        ex = tb = None
        try:
            send_value = (target(*args, **kwargs), None, None)
        except:
            _, ex, tb = sys.exc_info()
            send_value = (None, ex, tb)

        try:
            q.put(send_value)
        except:
            _, send_ex, send_tb = sys.exc_info()
            e_out = Exception(str(send_ex), send_tb, None if ex is None else str(ex), tb)
            q.put(e_out)

    p = Process(group=group, target=instrumented_target, name=name, args=args, kwargs=kwargs)
    p.start()
    result = q.get()
    p.join()
    if isinstance(result, tuple):
        if result[1]:
            raise_exception(result[1], result[2])
        return p.exitcode, result[0]
    else:
        msg = "Fatal error occurred in the forked process %s: %s" % (p, result.args[0])
        if result.args[2]:
            chained_message = "This error masked the send error '%s':\n%s" % (
                result.args[2],
                "".join(traceback.format_tb(result.args[3])),
            )
            msg += "\n" + chained_message
        ex = Exception(msg)
        raise_exception(ex, result.args[1])
开发者ID:Hawks12,项目名称:pybuilder,代码行数:54,代码来源:utils.py

示例6: learning_loop

def learning_loop(exit_flag: mp.Value,
                  gvfs: Sequence[Sequence[Learner]],
                  behaviour_gvf: SARSA,
                  main2gvf: mp.SimpleQueue,
                  gvf2main: mp.SimpleQueue,
                  gvf2plot: mp.SimpleQueue):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value == 0:
            obs, x = main2gvf.get()
            action, action_prob = behaviour_gvf.policy(obs=obs, x=x)
            gvf2main.put(action)

    # main loop
    while exit_flag.value == 0:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value:
            break

        # get data from servos
        obsp, xp = main2gvf.get()
        actionp, action_probp = behaviour_gvf.policy(obs=obsp, x=xp)

        # update weights
        for g in chain.from_iterable(gvfs):
            g.update(x, obs,
                     action, action_prob,
                     xp, obsp,
                     actionp, action_probp)

        # send action
        gvf2main.put(actionp)

        # send data to plots
        gdata = [[g.data(x, obs, action, xp, obsp)
                  for g in gs]
                 for gs in gvfs]
        data = dict(ChainMap(*chain.from_iterable(gdata)))
        data['obs'] = obs
        gvf2plot.put(data)

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp

    print('Done learning!')
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:53,代码来源:module4.py

示例7: data_from_file

async def data_from_file(main2gvf: mp.SimpleQueue,
                         gvf2plot: mp.SimpleQueue,
                         coder: KanervaCoder):
    data = np.load('offline_data.npy')

    for item in data:
        item[-1] = coder(item[-2])
        main2gvf.put(item)

    time.sleep(0.1)
    while not gvf2plot.empty():
        time.sleep(0.1)
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:12,代码来源:module_two.py

示例8: _fit

    def _fit(self, X, y, blocks):
        """Fit base clustering estimators on X."""
        self.blocks_ = blocks

        processes = []
        # Here the blocks will be passed to subprocesses
        data_queue = SimpleQueue()
        # Here the results will be passed back
        result_queue = SimpleQueue()
        for x in range(self.n_jobs):
            processes.append(mp.Process(target=_parallel_fit, args=(self.fit_,
                             self.partial_fit_, self.base_estimator,
                             self.verbose, data_queue, result_queue)))
            processes[-1].start()

        # First n_jobs blocks are sent into the queue without waiting for the
        # results. This variable is a counter that takes care of this.
        presend = 0
        blocks_computed = 0
        blocks_all = len(np.unique(blocks))

        for block in self._blocks(X, y, blocks):
            if presend >= self.n_jobs:
                b, clusterer = result_queue.get()
                blocks_computed += 1
                if clusterer:
                    self.clusterers_[b] = clusterer
            else:
                presend += 1
            if self.partial_fit_:
                if block[0] in self.clusterers_:
                    data_queue.put(('middle', block, self.clusterers_[b]))
                    continue

            data_queue.put(('middle', block, None))

        # Get the last results and tell the subprocesses to finish
        for x in range(self.n_jobs):
            if blocks_computed < blocks_all:
                print("%s blocks computed out of %s" % (blocks_computed,
                                                        blocks_all))
                b, clusterer = result_queue.get()
                blocks_computed += 1
                if clusterer:
                    self.clusterers_[b] = clusterer

        data_queue.put(('end', None, None))

        time.sleep(1)

        return self
开发者ID:MSusik,项目名称:beard,代码行数:51,代码来源:blocking.py

示例9: plotting_loop

def plotting_loop(exit_flag: mp.Value,
                  gvf2plot: mp.SimpleQueue,
                  plots: Sequence[Plot]):
    while exit_flag.value == 0:
        if locks:
            print('plot gp a 1 a')
            gplock.acquire()
            print('plot gp a 1 b')
        while exit_flag.value == 0 and gvf2plot.empty():
            if locks:
                print('plot gp r 1 a')
                gplock.release()
                print('plot gp r 1 b')
            time.sleep(0.001)
            if locks:
                print('plot gp a 2 a')
                gplock.acquire()
                print('plot gp a 2 b')

        if locks:
            print('plot gp r 2 a')
            gplock.release()
            print('plot gp r 2 b')
        if exit_flag.value:
            break

        if locks:
            print('plot gp a 3 a')
            gplock.acquire()
            print('plot gp a 3 b')
        d = gvf2plot.get()
        if locks:
            print('plot gp r 3 a')
            gplock.release()
            print('plot gp r 3 b')

        for plot, data in zip(plots, d):
            plot.update(data)

    for plot in plots:
        try:
            index = np.arange(len(plot.y[0]))
            np.savetxt(f"{plot.title}.csv",
                       np.column_stack(sum(((np.asarray(y),) for y in plot.y),
                                           (index,))),
                       delimiter=',')
        except ValueError:
            continue
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:48,代码来源:final_project.py

示例10: __init__

 def __init__(self, db_file="sqlite_db.sqlite", lock_wait_time=120):
     self.db_file = db_file
     self.connection = sqlite3.connect(self.db_file)
     self.broker_cursor = self.connection.cursor()
     self.broker_queue = SimpleQueue()
     self.broker = None
     self.lock_wait_time = lock_wait_time
开发者ID:biologyguy,项目名称:RD-MCL,代码行数:7,代码来源:helpers.py

示例11: _open_frontend

    def _open_frontend(self):
        from multiprocessing import Process, SimpleQueue

        connection = SimpleQueue()
        frontend = Process(
            target=self._open_frontend_process,
            args=(connection, [k for k in sys.argv[1:] if k != "--frontend"]))
        frontend.start()
        cmdline = connection.get()
        frontend.join()
        if self.interactive:
            argv_backup = list(sys.argv)
        sys.argv[1:] = cmdline.split()
        Main.setup_argv(True, True)
        if self.interactive:
            sys.argv = argv_backup
        print("Running with the following command line: %s" % sys.argv)
开发者ID:EgBulychev,项目名称:veles,代码行数:17,代码来源:__main__.py

示例12: start

def start(parsed_args):
    from multiprocessing import Process, SimpleQueue

    processes = []
    msg_queue = SimpleQueue()
    word_count_queue = SimpleQueue()
    unique_words_queue = SimpleQueue()
    median_queue = SimpleQueue()

    # Prep workers to read from msg queue and write to other queues
    for i in range(workers):
        p = Process(target=worker,
                      args=(msg_queue, unique_words_queue, word_count_queue))
        processes.append(p)
        p.start()

    # Prep a process to accumulate word_count_queue for ft1.txt
    p = Process(target=accumulator,
                  args=(word_count_queue, parsed_args.outdir))
    processes.append(p)
    p.start()

    # Prep a process to re-sequence unique words counted
    p = Process(target=buffered_resequener,
                  args=(unique_words_queue, median_queue))
    processes.append(p)
    p.start()

    # Prep a process to keep a running median of unique words for ft2.txt
    p = Process(target=running_median,
                  args=(median_queue, parsed_args.outdir))
    processes.append(p)
    p.start()

    # Start reading msgs for the msg_queue
    ingest(parsed_args.file, msg_queue)

    # Sending an indication to stop, one for each worker
    for i in range(workers):
        msg_queue.put(None)

    # This step gathers the child processes, but may be unnecessary
    for p in processes:
        p.join()
开发者ID:mhakanda,项目名称:insight-data-engineering-code-challenge,代码行数:44,代码来源:tweetStats.py

示例13: plotting_loop

def plotting_loop(exit_flag: mp.Value,
                  gvf2plot: mp.SimpleQueue,
                  plots: Sequence[Plot]):

    while exit_flag.value == 0:
        while exit_flag.value == 0 and gvf2plot.empty():
            time.sleep(0.001)
        if exit_flag.value:
            break
        data = gvf2plot.get()

        for plot in plots:
            plot.update(data)

    for plot in plots:
        index = np.arange(len(plot.y[0]))
        np.savetxt(f"{plot.title}.csv",
                   sum(((np.asarray(y),) for y in plot.y), (index,)),
                   delimiter=',')
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:19,代码来源:module_two.py

示例14: __init__

    def __init__(self, server, nickname, user, host='localhost'):
        self.server = server
        self.nickname = nickname
        self.realname = nickname
        self.user = user
        self.host = host

        self._readbuffer = ""
        self._writebuffer = ""
        self.request_queue = SimpleQueue()
        self.response_queue = SimpleQueue()

        # dict of board => list of users
        self.board_watchers = defaultdict(list)

        # dict of board, thread => list of users
        self.thread_watchers = defaultdict(lambda: defaultdict(list))

        Process(
            target=Ami,
            name='immediate api worker',
            args=(self.request_queue, self.response_queue)
        ).start()
开发者ID:ATRAN2,项目名称:Futami,代码行数:23,代码来源:client.py

示例15: __init__

    def __init__(self, request_queue, response_queue):
        self.request_queue = request_queue
        self.response_queue = response_queue
        self.update_request_queue = SimpleQueue()

        Process(
            target=self.update_loop,
            name='periodic api worker',
            args=(response_queue, self.update_request_queue),
        ).start()

        logger.debug("initialization complete")

        self.request_loop()
开发者ID:ATRAN2,项目名称:Futami,代码行数:14,代码来源:ami.py


注:本文中的multiprocessing.SimpleQueue类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。