当前位置: 首页>>代码示例>>Python>>正文


Python SimpleQueue.put方法代码示例

本文整理汇总了Python中multiprocessing.SimpleQueue.put方法的典型用法代码示例。如果您正苦于以下问题:Python SimpleQueue.put方法的具体用法?Python SimpleQueue.put怎么用?Python SimpleQueue.put使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.SimpleQueue的用法示例。


在下文中一共展示了SimpleQueue.put方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: export_table

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import put [as 别名]
def export_table(host, port, auth_key, db, table, directory, fields, delimiter, format,
                 error_queue, progress_info, sindex_counter, exit_event):
    writer = None

    try:
        # This will open at least one connection for each rdb_call_wrapper, which is
        # a little wasteful, but shouldn't be a big performance hit
        conn_fn = lambda: r.connect(host, port, auth_key=auth_key)
        table_info = rdb_call_wrapper(conn_fn, "info", write_table_metadata, db, table, directory)
        sindex_counter.value += len(table_info["indexes"])

        task_queue = SimpleQueue()
        writer = launch_writer(format, directory, db, table, fields, delimiter, task_queue, error_queue)
        writer.start()

        rdb_call_wrapper(conn_fn, "table scan", read_table_into_queue, db, table,
                         table_info["primary_key"], task_queue, progress_info, exit_event)
    except (r.ReqlError, r.ReqlDriverError) as ex:
        error_queue.put((RuntimeError, RuntimeError(ex.message), traceback.extract_tb(sys.exc_info()[2])))
    except:
        ex_type, ex_class, tb = sys.exc_info()
        error_queue.put((ex_type, ex_class, traceback.extract_tb(tb)))
    finally:
        if writer is not None and writer.is_alive():
            task_queue.put(StopIteration())
            writer.join()
开发者ID:HiroIshikawa,项目名称:21playground,代码行数:28,代码来源:_export.py

示例2: run

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import put [as 别名]
    def run(self, tasks, render, update, render_args=(), render_kwargs={}, update_args=(), update_kwargs={}):

        # establish ipc queues using a manager process
        task_queue = SimpleQueue()
        result_queue = SimpleQueue()

        # start process to generate image samples
        producer = Process(target=self._producer, args=(tasks, task_queue))
        producer.start()

        # start worker processes
        workers = []
        for pid in range(self._processes):
            p = Process(target=self._worker, args=(render, render_args, render_kwargs, task_queue, result_queue))
            p.start()
            workers.append(p)

        # consume results
        for _ in tasks:
            result = result_queue.get()
            update(result, *update_args, **update_kwargs)

        # shutdown workers
        for _ in workers:
            task_queue.put(None)
开发者ID:raysect,项目名称:source,代码行数:27,代码来源:workflow.py

示例3: data_from_file

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import put [as 别名]
async def data_from_file(main2gvf: mp.SimpleQueue,
                         coder: KanervaCoder):
    data = np.load('offline_data.npy')
    for i, item in enumerate(data):
        # if i > 500:
        #     break
        item[-1] = coder(x1=item[-1], x2=item[-2])
        main2gvf.put(item)
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:10,代码来源:final_project.py

示例4: learning_loop

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import put [as 别名]
def learning_loop(exit_flag: mp.Value,
                  gvfs: Sequence[Sequence[GTDLearner]],
                  main2gvf: mp.SimpleQueue,
                  gvf2plot: mp.SimpleQueue):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value == 0:
            action, action_prob, obs, x = main2gvf.get()

    i = 1

    # main loop
    while exit_flag.value == 0:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value:
            break

        i += 1
        ude = False
        rupee = False
        if 5000 < i < 5100:
            ude = True
        if i == 7000:
            rupee = True

        # get data from servos
        actionp, action_probp, obsp, xp = main2gvf.get()

        # update weights
        for gs, xi, xpi in zip(gvfs, x, xp):
            for g in gs:
                g.update(action, action_prob, obs, obsp, xi, xpi, ude, rupee)

        # send data to plots
        gdata = [[g.data(xi, obs, action, xpi, obsp)
                  for g in gs]
                 for gs, xi, xpi in zip(gvfs, x, xp)]
        data = dict(ChainMap(*chain.from_iterable(gdata)))
        data['obs'] = obs
        gvf2plot.put(data)

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp

    print('Done learning!')
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:55,代码来源:module3.py

示例5: data_from_file

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import put [as 别名]
async def data_from_file(main2gvf: mp.SimpleQueue,
                         gvf2plot: mp.SimpleQueue,
                         coder: KanervaCoder):
    data = np.load('offline_data.npy')

    for item in data:
        item[-1] = coder(item[-2])
        main2gvf.put(item)

    time.sleep(0.1)
    while not gvf2plot.empty():
        time.sleep(0.1)
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:14,代码来源:module_two.py

示例6: start

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import put [as 别名]
def start(parsed_args):
    from multiprocessing import Process, SimpleQueue

    processes = []
    msg_queue = SimpleQueue()
    word_count_queue = SimpleQueue()
    unique_words_queue = SimpleQueue()
    median_queue = SimpleQueue()

    # Prep workers to read from msg queue and write to other queues
    for i in range(workers):
        p = Process(target=worker,
                      args=(msg_queue, unique_words_queue, word_count_queue))
        processes.append(p)
        p.start()

    # Prep a process to accumulate word_count_queue for ft1.txt
    p = Process(target=accumulator,
                  args=(word_count_queue, parsed_args.outdir))
    processes.append(p)
    p.start()

    # Prep a process to re-sequence unique words counted
    p = Process(target=buffered_resequener,
                  args=(unique_words_queue, median_queue))
    processes.append(p)
    p.start()

    # Prep a process to keep a running median of unique words for ft2.txt
    p = Process(target=running_median,
                  args=(median_queue, parsed_args.outdir))
    processes.append(p)
    p.start()

    # Start reading msgs for the msg_queue
    ingest(parsed_args.file, msg_queue)

    # Sending an indication to stop, one for each worker
    for i in range(workers):
        msg_queue.put(None)

    # This step gathers the child processes, but may be unnecessary
    for p in processes:
        p.join()
开发者ID:mhakanda,项目名称:insight-data-engineering-code-challenge,代码行数:46,代码来源:tweetStats.py

示例7: learning_loop

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import put [as 别名]
def learning_loop(exit_flag: mp.Value,
                  gvfs: Sequence[GTDLearner],
                  main2gvf: mp.SimpleQueue,
                  gvf2plot: mp.SimpleQueue):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value == 0:
            action, action_prob, obs, x = main2gvf.get()

    # main loop
    while exit_flag.value == 0:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value:
            break

        # get data from servos
        actionp, action_probp, obsp, xp = main2gvf.get()

        # update weights
        for g in gvfs:
            g.update(action, action_prob, obs, obsp, x, xp)

        # send data to plots
        data = [[obs]] + [g.data(x, obs, action, xp, obsp) for g in gvfs]
        gvf2plot.put(data)

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:38,代码来源:module_two.py

示例8: learning_loop

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import put [as 别名]
def learning_loop(exit_flag: mp.Value,
                  gvfs: Sequence[Sequence[Learner]],
                  behaviour_gvf: SARSA,
                  main2gvf: mp.SimpleQueue,
                  gvf2main: mp.SimpleQueue,
                  gvf2plot: mp.SimpleQueue):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value == 0:
            obs, x = main2gvf.get()
            action, action_prob = behaviour_gvf.policy(obs=obs, x=x)
            gvf2main.put(action)

    # main loop
    while exit_flag.value == 0:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value:
            break

        # get data from servos
        obsp, xp = main2gvf.get()
        actionp, action_probp = behaviour_gvf.policy(obs=obsp, x=xp)

        # update weights
        for g in chain.from_iterable(gvfs):
            g.update(x, obs,
                     action, action_prob,
                     xp, obsp,
                     actionp, action_probp)

        # send action
        gvf2main.put(actionp)

        # send data to plots
        gdata = [[g.data(x, obs, action, xp, obsp)
                  for g in gs]
                 for gs in gvfs]
        data = dict(ChainMap(*chain.from_iterable(gdata)))
        data['obs'] = obs
        gvf2plot.put(data)

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp

    print('Done learning!')
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:55,代码来源:module4.py

示例9: _fit

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import put [as 别名]
    def _fit(self, X, y, blocks):
        """Fit base clustering estimators on X."""
        self.blocks_ = blocks

        processes = []
        # Here the blocks will be passed to subprocesses
        data_queue = SimpleQueue()
        # Here the results will be passed back
        result_queue = SimpleQueue()
        for x in range(self.n_jobs):
            processes.append(mp.Process(target=_parallel_fit, args=(self.fit_,
                             self.partial_fit_, self.base_estimator,
                             self.verbose, data_queue, result_queue)))
            processes[-1].start()

        # First n_jobs blocks are sent into the queue without waiting for the
        # results. This variable is a counter that takes care of this.
        presend = 0
        blocks_computed = 0
        blocks_all = len(np.unique(blocks))

        for block in self._blocks(X, y, blocks):
            if presend >= self.n_jobs:
                b, clusterer = result_queue.get()
                blocks_computed += 1
                if clusterer:
                    self.clusterers_[b] = clusterer
            else:
                presend += 1
            if self.partial_fit_:
                if block[0] in self.clusterers_:
                    data_queue.put(('middle', block, self.clusterers_[b]))
                    continue

            data_queue.put(('middle', block, None))

        # Get the last results and tell the subprocesses to finish
        for x in range(self.n_jobs):
            if blocks_computed < blocks_all:
                print("%s blocks computed out of %s" % (blocks_computed,
                                                        blocks_all))
                b, clusterer = result_queue.get()
                blocks_computed += 1
                if clusterer:
                    self.clusterers_[b] = clusterer

        data_queue.put(('end', None, None))

        time.sleep(1)

        return self
开发者ID:MSusik,项目名称:beard,代码行数:53,代码来源:blocking.py

示例10: servo_loop

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import put [as 别名]
async def servo_loop(device: str,
                     sids: Sequence[int],
                     main2gvf: mp.SimpleQueue,
                     behaviour_policy: DiscretePolicy,
                     coder: KanervaCoder,
                     **kwargs):
    # objects to read and write from servos
    sr, sw = await serial_asyncio.open_serial_connection(url=device,
                                                         **kwargs)

    # set servo speeds to slowest possible
    for sid in sids:
        # await send_msg(sr, sw, sid, [])
        await send_msg(sr, sw, sid, [0x03, 0x20, 0x00, 0x01])

    # set initial action
    action = initial_action

    # some constants
    # read_data = [0x02,  # read
    #              0x24,  # starting from 0x24
    #              0x08]  # a string of 8 bytes

    read_all = [0x02,  # read
                0x00,  # starting from the beginning
                0x32]  # all the bytes

    store_data = []

    try:
        for _ in range(20000):
            # read data from servos
            byte_data = [await send_msg(sr, sw, sid, read_all) for sid in sids]

            # convert to human-readable data
            obs = sum([parse_data(bd) for bd in byte_data], list(action))

            # make feature vector
            active_pts = coder(obs=obs, byte_data=byte_data)

            # get most recent weights from control GVFs
            pass

            # decide on an action
            action, action_prob = behaviour_policy(obs=obs, x=active_pts)

            # send action to servos
            instructions = [goal_instruction(a)
                            for a in action
                            if a is not None]
            for sid, instr in zip(sids, instructions):
                await send_msg(sr, sw, sid, instr)

            # send action and features to GVFs
            gvf_data = (action, action_prob, obs, active_pts)
            if locks:
                print('main gm a 1 a')
                gmlock.acquire()
                print('main gm a 1 b')
            main2gvf.put(gvf_data)
            if locks:
                print('main gm r 1 a')
                gmlock.release()
                print('main gm r 1 b')

            # record data for later
            store_data.append(gvf_data)

        np.save('offline_data.npy', store_data)

    except KeyboardInterrupt:
        pass
    finally:
        sr.read()
        await sw.drain()

        for sid in sids:
            write(sw, sid, [0x03, 0x18, 0x00])  # disable torque
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:80,代码来源:final_project.py

示例11: export_table

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import put [as 别名]
def export_table(db, table, directory, options, error_queue, progress_info, sindex_counter, hook_counter, exit_event):
    signal.signal(signal.SIGINT, signal.SIG_DFL) # prevent signal handlers from being set in child processes
    
    writer = None

    try:
        # -- get table info
        
        table_info = options.retryQuery('table info: %s.%s' % (db, table), query.db(db).table(table).info())
        
        # Rather than just the index names, store all index information
        table_info['indexes'] = options.retryQuery(
            'table index data %s.%s' % (db, table),
            query.db(db).table(table).index_status(),
            runOptions={'binary_format':'raw'}
        )
        
        sindex_counter.value += len(table_info["indexes"])

        table_info['write_hook'] = options.retryQuery(
            'table write hook data %s.%s' % (db, table),
            query.db(db).table(table).get_write_hook(),
            runOptions={'binary_format':'raw'})

        if table_info['write_hook'] != None:
            hook_counter.value += 1

        with open(os.path.join(directory, db, table + '.info'), 'w') as info_file:
            info_file.write(json.dumps(table_info) + "\n")
        with sindex_counter.get_lock():
            sindex_counter.value += len(table_info["indexes"])
        # -- start the writer
        task_queue = SimpleQueue()
        writer = None
        if options.format == "json":
            filename = directory + "/%s/%s.json" % (db, table)
            writer = multiprocessing.Process(target=json_writer, args=(filename, options.fields, task_queue, error_queue, options.format))
        elif options.format == "csv":
            filename = directory + "/%s/%s.csv" % (db, table)
            writer = multiprocessing.Process(target=csv_writer, args=(filename, options.fields, options.delimiter, task_queue, error_queue))
        elif options.format == "ndjson":
            filename = directory + "/%s/%s.ndjson" % (db, table)
            writer = multiprocessing.Process(target=json_writer, args=(filename, options.fields, task_queue, error_queue, options.format))
        else:
            raise RuntimeError("unknown format type: %s" % options.format)
        writer.start()
        
        # -- read in the data source
        
        # - 
        
        lastPrimaryKey = None
        read_rows      = 0
        runOptions     = {
            "time_format":"raw",
            "binary_format":"raw"
        }
        if options.outdated:
            runOptions["read_mode"] = "outdated"
        cursor = options.retryQuery(
            'inital cursor for %s.%s' % (db, table),
            query.db(db).table(table).order_by(index=table_info["primary_key"]),
            runOptions=runOptions
        )
        while not exit_event.is_set():
            try:
                for row in cursor:
                    # bail on exit
                    if exit_event.is_set():
                        break
                    
                    # add to the output queue
                    task_queue.put([row])
                    lastPrimaryKey = row[table_info["primary_key"]]
                    read_rows += 1
                    
                    # Update the progress every 20 rows
                    if read_rows % 20 == 0:
                        progress_info[0].value = read_rows
                
                else:
                    # Export is done - since we used estimates earlier, update the actual table size
                    progress_info[0].value = read_rows
                    progress_info[1].value = read_rows
                    break
            
            except (errors.ReqlTimeoutError, errors.ReqlDriverError) as e:
                # connection problem, re-setup the cursor
                try:
                    cursor.close()
                except Exception: pass
                cursor = options.retryQuery(
                    'backup cursor for %s.%s' % (db, table),
                    query.db(db).table(table).between(lastPrimaryKey, None, left_bound="open").order_by(index=table_info["primary_key"]),
                    runOptions=runOptions
                )
    
    except (errors.ReqlError, errors.ReqlDriverError) as ex:
        error_queue.put((RuntimeError, RuntimeError(ex.message), traceback.extract_tb(sys.exc_info()[2])))
    except:
#.........这里部分代码省略.........
开发者ID:AtnNn,项目名称:rethinkdb,代码行数:103,代码来源:_export.py

示例12: servo_loop

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import put [as 别名]
async def servo_loop(device: str,
                     sids: Sequence[int],
                     coder: KanervaCoder,
                     main2gvf: mp.SimpleQueue,
                     gvf2main: mp.SimpleQueue,
                     **kwargs):
    # objects to read and write from servos
    sr, sw = await serial_asyncio.open_serial_connection(url=device,
                                                         **kwargs)

    # set servo speeds to slowest possible
    for sid in sids:
        await send_msg(sr, sw, sid, [0x03, 0x20, 0x00, 0x01])

    # set initial action
    action = initial_action

    # some constants
    read_data = [0x02,  # read
                 0x24,  # starting from 0x24
                 0x08]  # a string of 8 bytes

    # read_all = [0x02,  # read
    #             0x00,  # starting from the beginning
    #             0x32]  # all the bytes

    store_data = []

    try:
        for _ in range(20000):
            # read data from servos
            byte_data = [await send_msg(sr, sw, sid, read_data) for sid in sids]

            # convert to human-readable data
            obs = sum([parse_data(bd) for bd in byte_data], []) + list(action)

            # get active tiles in kanerva coding
            active_pts = coder(obs)

            # send action and features to GVFs
            gvf_data = (obs, active_pts)
            main2gvf.put(gvf_data)

            # get action control GVFs
            action = gvf2main.get()

            # send action to servos
            instructions = [goal_instruction(a)
                            for a in action
                            if a is not None]
            for sid, instr in zip(sids, instructions):
                await send_msg(sr, sw, sid, instr)

            # record data for later
            store_data.append(gvf_data)

        np.save('offline_data.npy', store_data)

    except KeyboardInterrupt:
        pass
    finally:
        sr.read()
        await sw.drain()

        for sid in sids:
            write(sw, sid, [0x03, 0x18, 0x00])  # disable torque
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:68,代码来源:module4.py

示例13: InternalClient

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import put [as 别名]

#.........这里部分代码省略.........
        pass
        # prefix, message = message.split(" ", 1)

        # prefix = self._parse_prefix(prefix)

        # self.sending_client = self.server.get_client(prefix['nickname'])

        # self._readbuffer = message + '\r\n'
        # self._parse_read_buffer()

    def client_joined(self, client, channel):
        logger.debug("InternalClient handling {} joined {}".format(client, channel))

        channel_registration_map = {
            r'#/(.+)/$': self._client_register_board,
            r'#/(.+)/(\d+)$': self._client_register_thread,
        }

        matched_registration = False

        for regex, register_method in channel_registration_map.items():
            m = re.match(regex, channel.name)
            if m:
                register_method(client, channel, *m.groups())
                matched_registration = True
                break

        if not matched_registration:
            self._send_message(
                client, channel.name,
                "This channel ({}) doesn't look like a board. Nothing will happen in this channel.".format(channel.name)
            )
            return

    def _handle_command(self, command, arguments):
        # sending_client = self.sending_client
        # self.sending_client = None

        # Add handling here for actual input from users other than joins
        pass

    def _client_register_board(self, client, channel, board):
        logger.debug("registering to board: {}, {}, {}".format(client, channel, board))

        slash_board = '/{}/'.format(board)
        self._send_message(
            client, channel.name,
            "Welcome to {}, loading threads...".format(slash_board),
            sending_nick=slash_board,
        )

        target = BoardTarget(board)

        self.request_queue.put(
            SubscriptionUpdate.make(
                action=Action.LoadAndFollow,
                target=target,
                payload=(client.nickname, channel.name, target),
        ))

        self.board_watchers[board].append(client)

    def _client_register_thread(self, client, channel, board, thread):
        logging.debug("registering to thread: {}, {}, {}, {}".format(client, channel, board, thread))

        slash_board_thread = '/{}/{}'.format(board, thread)

        self._send_message(
            client, channel.name,
            "Welcome to >>>{}, loading posts...".format(slash_board_thread),
            sending_nick=slash_board_thread,
        )

        target = ThreadTarget(board, thread)

        self.request_queue.put(
            SubscriptionUpdate.make(
                action=Action.LoadAndFollow,
                target=target,
                payload=(client.nickname, channel.name, target),
        ))

        # Thread reply_tos are ints when they come back from the API
        self.thread_watchers[board][int(thread)].append(client)

    def _send_message(self, client, channel, message, sending_nick=None):
        if sending_nick:
            real_nick = self.nickname
            self.nickname = sending_nick

        client.message(
            ":{} PRIVMSG {} :{}".format(
                self.prefix,
                channel,
                message,
            )
        )

        if sending_nick:
            self.nickname = real_nick
开发者ID:ATRAN2,项目名称:Futami,代码行数:104,代码来源:client.py

示例14: ProcessPoolExecutor

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import put [as 别名]
class ProcessPoolExecutor(_base.Executor):
    def __init__(self, max_workers=None):
        """Initializes a new ProcessPoolExecutor instance.

        Args:
            max_workers: The maximum number of processes that can be used to
                execute the given calls. If None or not given then as many
                worker processes will be created as the machine has processors.
        """
        _check_system_limits()

        if max_workers is None:
            self._max_workers = os.cpu_count() or 1
        else:
            self._max_workers = max_workers

        # Make the call queue slightly larger than the number of processes to
        # prevent the worker processes from idling. But don't make it too big
        # because futures in the call queue cannot be cancelled.
        self._call_queue = multiprocessing.Queue(self._max_workers +
                                                 EXTRA_QUEUED_CALLS)
        # Killed worker processes can produce spurious "broken pipe"
        # tracebacks in the queue's own worker thread. But we detect killed
        # processes anyway, so silence the tracebacks.
        self._call_queue._ignore_epipe = True
        self._result_queue = SimpleQueue()
        self._work_ids = queue.Queue()
        self._queue_management_thread = None
        # Map of pids to processes
        self._processes = {}

        # Shutdown is a two-step process.
        self._shutdown_thread = False
        self._shutdown_lock = threading.Lock()
        self._broken = False
        self._queue_count = 0
        self._pending_work_items = {}

    def _start_queue_management_thread(self):
        # When the executor gets lost, the weakref callback will wake up
        # the queue management thread.
        def weakref_cb(_, q=self._result_queue):
            q.put(None)
        if self._queue_management_thread is None:
            # Start the processes so that their sentinels are known.
            self._adjust_process_count()
            self._queue_management_thread = threading.Thread(
                    target=_queue_management_worker,
                    args=(weakref.ref(self, weakref_cb),
                          self._processes,
                          self._pending_work_items,
                          self._work_ids,
                          self._call_queue,
                          self._result_queue))
            self._queue_management_thread.daemon = True
            self._queue_management_thread.start()
            _threads_queues[self._queue_management_thread] = self._result_queue

    def _adjust_process_count(self):
        for _ in range(len(self._processes), self._max_workers):
            p = multiprocessing.Process(
                    target=_process_worker,
                    args=(self._call_queue,
                          self._result_queue))
            p.start()
            self._processes[p.pid] = p

    def submit(self, fn, *args, **kwargs):
        with self._shutdown_lock:
            if self._broken:
                raise BrokenProcessPool('A child process terminated '
                    'abruptly, the process pool is not usable anymore')
            if self._shutdown_thread:
                raise RuntimeError('cannot schedule new futures after shutdown')

            f = _base.Future()
            w = _WorkItem(f, fn, args, kwargs)

            self._pending_work_items[self._queue_count] = w
            self._work_ids.put(self._queue_count)
            self._queue_count += 1
            # Wake up queue management thread
            self._result_queue.put(None)

            self._start_queue_management_thread()
            return f
    submit.__doc__ = _base.Executor.submit.__doc__

    def shutdown(self, wait=True):
        with self._shutdown_lock:
            self._shutdown_thread = True
        if self._queue_management_thread:
            # Wake up queue management thread
            self._result_queue.put(None)
            if wait:
                self._queue_management_thread.join()
        # To reduce the risk of opening too many files, remove references to
        # objects that use file descriptors.
        self._queue_management_thread = None
        self._call_queue = None
#.........这里部分代码省略.........
开发者ID:5outh,项目名称:Databases-Fall2014,代码行数:103,代码来源:process.py

示例15: str

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import put [as 别名]
            while True:
                try:
                    if not self.send_queue.empty(): # If their is something to send
                        message = self.send_queue.get()
                        message = str(len(message.encode())).zfill(4) + message # Prefix four digit byte amount
                        connection.send(message.encode())
                        print("Sent message")
                    size_prefix = connection.recv(4).decode() # Recieve four bytes
                    if size_prefix == '': # If disconnected
                        print("Connection finished")
                        break
                    if size_prefix: # If message was recieved
                        self.recv_queue.put(connection.recv(int(size_prefix)).decode())
                        print("Recieved message")
                except Exception as error:
                    if error.errno != EWOULDBLOCK: # Remove error messages due to non-blocking
                        print(error)


# Setup process stuff

recv_queue = SimpleQueue()
send_queue = SimpleQueue()

server = Server(send_queue, recv_queue)
server.start()

while True:
    send_queue.put("mayonaise")
    print(recv_queue.get())
开发者ID:PoolFeast6969,项目名称:amav,代码行数:32,代码来源:Server.py


注:本文中的multiprocessing.SimpleQueue.put方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。