当前位置: 首页>>代码示例>>Python>>正文


Python SimpleQueue.get方法代码示例

本文整理汇总了Python中multiprocessing.SimpleQueue.get方法的典型用法代码示例。如果您正苦于以下问题:Python SimpleQueue.get方法的具体用法?Python SimpleQueue.get怎么用?Python SimpleQueue.get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.SimpleQueue的用法示例。


在下文中一共展示了SimpleQueue.get方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: merge_db

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import get [as 别名]
def merge_db(db_folder, new_db_name, db_to_merge):

    assert path.exists(db_folder), '`{}` is a wrong path to db folder, please correct it.'.format(db_folder)

    shutdown = Event()
    writer_queue = SimpleQueue()

    writer = Writer(db_folder=db_folder, db_name=new_db_name, queue=writer_queue, shutdown=shutdown)
    reader = Reader(db_folder=db_folder, db_to_merge=db_to_merge,
                    queue=writer_queue, shutdown=shutdown)

    reader.start()
    writer.start()

    pbar = tqdm(total=len(db_to_merge))

    c = 0
    while not shutdown.is_set():
        try:
            new_c = writer.counter.value
            progress = new_c - c
            if progress > 0:
                pbar.update(progress)
                c = new_c
            Event().wait(2)

        except KeyboardInterrupt:
            print()
            print("Main thread grab the keyboard interrupt")
            break

    shutdown.set()
    pbar.close()
    # writer.join()
    # reader.join()

    print("writer alive", writer.is_alive())
    print("reader alive", reader.is_alive())

    if writer.is_alive():

        print("Waiting writer...")
        writer.join()

    print("WRITER EXECUTED")

    if reader.is_alive():
        print("Waiting reader...")
        writer_queue.get()
        print("Waiting reader 2...")
        reader.join()

    print("READER EXECUTED")

    print("Done.")
开发者ID:getzneet,项目名称:SpatialEconomy,代码行数:57,代码来源:merge_db.py

示例2: learning_loop

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import get [as 别名]
def learning_loop(exit_flag: mp.Value,
                  gvfs: Sequence[Sequence[Learner]],
                  behaviour_gvf: SARSA,
                  main2gvf: mp.SimpleQueue,
                  gvf2main: mp.SimpleQueue,
                  gvf2plot: mp.SimpleQueue):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value == 0:
            obs, x = main2gvf.get()
            action, action_prob = behaviour_gvf.policy(obs=obs, x=x)
            gvf2main.put(action)

    # main loop
    while exit_flag.value == 0:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value:
            break

        # get data from servos
        obsp, xp = main2gvf.get()
        actionp, action_probp = behaviour_gvf.policy(obs=obsp, x=xp)

        # update weights
        for g in chain.from_iterable(gvfs):
            g.update(x, obs,
                     action, action_prob,
                     xp, obsp,
                     actionp, action_probp)

        # send action
        gvf2main.put(actionp)

        # send data to plots
        gdata = [[g.data(x, obs, action, xp, obsp)
                  for g in gs]
                 for gs in gvfs]
        data = dict(ChainMap(*chain.from_iterable(gdata)))
        data['obs'] = obs
        gvf2plot.put(data)

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp

    print('Done learning!')
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:55,代码来源:module4.py

示例3: learning_loop

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import get [as 别名]
def learning_loop(exit_flag: mp.Value,
                  gvfs: Sequence[Sequence[GTDLearner]],
                  main2gvf: mp.SimpleQueue,
                  gvf2plot: mp.SimpleQueue):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value == 0:
            action, action_prob, obs, x = main2gvf.get()

    i = 1

    # main loop
    while exit_flag.value == 0:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value:
            break

        i += 1
        ude = False
        rupee = False
        if 5000 < i < 5100:
            ude = True
        if i == 7000:
            rupee = True

        # get data from servos
        actionp, action_probp, obsp, xp = main2gvf.get()

        # update weights
        for gs, xi, xpi in zip(gvfs, x, xp):
            for g in gs:
                g.update(action, action_prob, obs, obsp, xi, xpi, ude, rupee)

        # send data to plots
        gdata = [[g.data(xi, obs, action, xpi, obsp)
                  for g in gs]
                 for gs, xi, xpi in zip(gvfs, x, xp)]
        data = dict(ChainMap(*chain.from_iterable(gdata)))
        data['obs'] = obs
        gvf2plot.put(data)

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp

    print('Done learning!')
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:55,代码来源:module3.py

示例4: _fit

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import get [as 别名]
    def _fit(self, X, y, blocks):
        """Fit base clustering estimators on X."""
        self.blocks_ = blocks

        processes = []
        # Here the blocks will be passed to subprocesses
        data_queue = SimpleQueue()
        # Here the results will be passed back
        result_queue = SimpleQueue()
        for x in range(self.n_jobs):
            processes.append(mp.Process(target=_parallel_fit, args=(self.fit_,
                             self.partial_fit_, self.base_estimator,
                             self.verbose, data_queue, result_queue)))
            processes[-1].start()

        # First n_jobs blocks are sent into the queue without waiting for the
        # results. This variable is a counter that takes care of this.
        presend = 0
        blocks_computed = 0
        blocks_all = len(np.unique(blocks))

        for block in self._blocks(X, y, blocks):
            if presend >= self.n_jobs:
                b, clusterer = result_queue.get()
                blocks_computed += 1
                if clusterer:
                    self.clusterers_[b] = clusterer
            else:
                presend += 1
            if self.partial_fit_:
                if block[0] in self.clusterers_:
                    data_queue.put(('middle', block, self.clusterers_[b]))
                    continue

            data_queue.put(('middle', block, None))

        # Get the last results and tell the subprocesses to finish
        for x in range(self.n_jobs):
            if blocks_computed < blocks_all:
                print("%s blocks computed out of %s" % (blocks_computed,
                                                        blocks_all))
                b, clusterer = result_queue.get()
                blocks_computed += 1
                if clusterer:
                    self.clusterers_[b] = clusterer

        data_queue.put(('end', None, None))

        time.sleep(1)

        return self
开发者ID:MSusik,项目名称:beard,代码行数:53,代码来源:blocking.py

示例5: run

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import get [as 别名]
    def run(self, tasks, render, update, render_args=(), render_kwargs={}, update_args=(), update_kwargs={}):

        # establish ipc queues using a manager process
        task_queue = SimpleQueue()
        result_queue = SimpleQueue()

        # start process to generate image samples
        producer = Process(target=self._producer, args=(tasks, task_queue))
        producer.start()

        # start worker processes
        workers = []
        for pid in range(self._processes):
            p = Process(target=self._worker, args=(render, render_args, render_kwargs, task_queue, result_queue))
            p.start()
            workers.append(p)

        # consume results
        for _ in tasks:
            result = result_queue.get()
            update(result, *update_args, **update_kwargs)

        # shutdown workers
        for _ in workers:
            task_queue.put(None)
开发者ID:raysect,项目名称:source,代码行数:27,代码来源:workflow.py

示例6: fork_process

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import get [as 别名]
def fork_process(logger, group=None, target=None, name=None, args=(), kwargs={}):
    """
    Forks a child, making sure that all exceptions from the child are safely sent to the parent
    If a target raises an exception, the exception is re-raised in the parent process
    @return tuple consisting of process exit code and target's return value
    """
    if is_windows():
        logger.warn(
            "Not forking for %s due to Windows incompatibilities (see #184). "
            "Measurements (coverage, etc.) might be biased." % target
        )
        return fake_windows_fork(group, target, name, args, kwargs)
    try:
        sys.modules["tblib.pickling_support"]
    except KeyError:
        import tblib.pickling_support

        tblib.pickling_support.install()

    q = SimpleQueue()

    def instrumented_target(*args, **kwargs):
        ex = tb = None
        try:
            send_value = (target(*args, **kwargs), None, None)
        except:
            _, ex, tb = sys.exc_info()
            send_value = (None, ex, tb)

        try:
            q.put(send_value)
        except:
            _, send_ex, send_tb = sys.exc_info()
            e_out = Exception(str(send_ex), send_tb, None if ex is None else str(ex), tb)
            q.put(e_out)

    p = Process(group=group, target=instrumented_target, name=name, args=args, kwargs=kwargs)
    p.start()
    result = q.get()
    p.join()
    if isinstance(result, tuple):
        if result[1]:
            raise_exception(result[1], result[2])
        return p.exitcode, result[0]
    else:
        msg = "Fatal error occurred in the forked process %s: %s" % (p, result.args[0])
        if result.args[2]:
            chained_message = "This error masked the send error '%s':\n%s" % (
                result.args[2],
                "".join(traceback.format_tb(result.args[3])),
            )
            msg += "\n" + chained_message
        ex = Exception(msg)
        raise_exception(ex, result.args[1])
开发者ID:Hawks12,项目名称:pybuilder,代码行数:56,代码来源:utils.py

示例7: learning_loop

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import get [as 别名]
def learning_loop(exit_flag: mp.Value,
                  gvfs: Sequence[GTDLearner],
                  main2gvf: mp.SimpleQueue,
                  gvf2plot: mp.SimpleQueue):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value == 0:
            action, action_prob, obs, x = main2gvf.get()

    # main loop
    while exit_flag.value == 0:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value:
            break

        # get data from servos
        actionp, action_probp, obsp, xp = main2gvf.get()

        # update weights
        for g in gvfs:
            g.update(action, action_prob, obs, obsp, x, xp)

        # send data to plots
        data = [[obs]] + [g.data(x, obs, action, xp, obsp) for g in gvfs]
        gvf2plot.put(data)

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:38,代码来源:module_two.py

示例8: plotting_loop

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import get [as 别名]
def plotting_loop(exit_flag: mp.Value,
                  gvf2plot: mp.SimpleQueue,
                  plots: Sequence[Plot]):
    while exit_flag.value == 0:
        if locks:
            print('plot gp a 1 a')
            gplock.acquire()
            print('plot gp a 1 b')
        while exit_flag.value == 0 and gvf2plot.empty():
            if locks:
                print('plot gp r 1 a')
                gplock.release()
                print('plot gp r 1 b')
            time.sleep(0.001)
            if locks:
                print('plot gp a 2 a')
                gplock.acquire()
                print('plot gp a 2 b')

        if locks:
            print('plot gp r 2 a')
            gplock.release()
            print('plot gp r 2 b')
        if exit_flag.value:
            break

        if locks:
            print('plot gp a 3 a')
            gplock.acquire()
            print('plot gp a 3 b')
        d = gvf2plot.get()
        if locks:
            print('plot gp r 3 a')
            gplock.release()
            print('plot gp r 3 b')

        for plot, data in zip(plots, d):
            plot.update(data)

    for plot in plots:
        try:
            index = np.arange(len(plot.y[0]))
            np.savetxt(f"{plot.title}.csv",
                       np.column_stack(sum(((np.asarray(y),) for y in plot.y),
                                           (index,))),
                       delimiter=',')
        except ValueError:
            continue
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:50,代码来源:final_project.py

示例9: _open_frontend

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import get [as 别名]
    def _open_frontend(self):
        from multiprocessing import Process, SimpleQueue

        connection = SimpleQueue()
        frontend = Process(
            target=self._open_frontend_process,
            args=(connection, [k for k in sys.argv[1:] if k != "--frontend"]))
        frontend.start()
        cmdline = connection.get()
        frontend.join()
        if self.interactive:
            argv_backup = list(sys.argv)
        sys.argv[1:] = cmdline.split()
        Main.setup_argv(True, True)
        if self.interactive:
            sys.argv = argv_backup
        print("Running with the following command line: %s" % sys.argv)
开发者ID:EgBulychev,项目名称:veles,代码行数:19,代码来源:__main__.py

示例10: plotting_loop

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import get [as 别名]
def plotting_loop(exit_flag: mp.Value,
                  gvf2plot: mp.SimpleQueue,
                  plots: Sequence[Plot]):

    while exit_flag.value == 0:
        while exit_flag.value == 0 and gvf2plot.empty():
            time.sleep(0.001)
        if exit_flag.value:
            break
        data = gvf2plot.get()

        for plot in plots:
            plot.update(data)

    for plot in plots:
        index = np.arange(len(plot.y[0]))
        np.savetxt(f"{plot.title}.csv",
                   sum(((np.asarray(y),) for y in plot.y), (index,)),
                   delimiter=',')
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:21,代码来源:module_two.py

示例11: str

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import get [as 别名]
            while True:
                try:
                    if not self.send_queue.empty(): # If their is something to send
                        message = self.send_queue.get()
                        message = str(len(message.encode())).zfill(4) + message # Prefix four digit byte amount
                        connection.send(message.encode())
                        print("Sent message")
                    size_prefix = connection.recv(4).decode() # Recieve four bytes
                    if size_prefix == '': # If disconnected
                        print("Connection finished")
                        break
                    if size_prefix: # If message was recieved
                        self.recv_queue.put(connection.recv(int(size_prefix)).decode())
                        print("Recieved message")
                except Exception as error:
                    if error.errno != EWOULDBLOCK: # Remove error messages due to non-blocking
                        print(error)


# Setup process stuff

recv_queue = SimpleQueue()
send_queue = SimpleQueue()

server = Server(send_queue, recv_queue)
server.start()

while True:
    send_queue.put("mayonaise")
    print(recv_queue.get())
开发者ID:PoolFeast6969,项目名称:amav,代码行数:32,代码来源:Server.py

示例12: spawn_import_clients

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import get [as 别名]
def spawn_import_clients(options, files_info):
    # Spawn one reader process for each db.table, as well as many client processes
    task_queue = SimpleQueue()
    error_queue = SimpleQueue()
    exit_event = multiprocessing.Event()
    interrupt_event = multiprocessing.Event()
    errors = []
    reader_procs = []
    client_procs = []

    parent_pid = os.getpid()
    signal.signal(signal.SIGINT, lambda a, b: abort_import(a, b, parent_pid, exit_event, task_queue, client_procs, interrupt_event))

    try:
        progress_info = []
        rows_written = multiprocessing.Value(ctypes.c_longlong, 0)

        for i in xrange(options["clients"]):
            client_procs.append(multiprocessing.Process(target=client_process,
                                                        args=(options["host"],
                                                              options["port"],
                                                              options["auth_key"],
                                                              task_queue,
                                                              error_queue,
                                                              rows_written,
                                                              options["force"],
                                                              options["durability"])))
            client_procs[-1].start()

        for file_info in files_info:
            progress_info.append((multiprocessing.Value(ctypes.c_longlong, -1), # Current lines/bytes processed
                                  multiprocessing.Value(ctypes.c_longlong, 0))) # Total lines/bytes to process
            reader_procs.append(multiprocessing.Process(target=table_reader,
                                                        args=(options,
                                                              file_info,
                                                              task_queue,
                                                              error_queue,
                                                              progress_info[-1],
                                                              exit_event)))
            reader_procs[-1].start()

        # Wait for all reader processes to finish - hooray, polling
        while len(reader_procs) > 0:
            time.sleep(0.1)
            # If an error has occurred, exit out early
            while not error_queue.empty():
                exit_event.set()
                errors.append(error_queue.get())
            reader_procs = [proc for proc in reader_procs if proc.is_alive()]
            update_progress(progress_info)

        # Wait for all clients to finish
        alive_clients = sum([client.is_alive() for client in client_procs])
        for i in xrange(alive_clients):
            task_queue.put(StopIteration())

        while len(client_procs) > 0:
            time.sleep(0.1)
            client_procs = [client for client in client_procs if client.is_alive()]

        # If we were successful, make sure 100% progress is reported
        if len(errors) == 0 and not interrupt_event.is_set():
            print_progress(1.0)

        def plural(num, text):
            return "%d %s%s" % (num, text, "" if num == 1 else "s")

        # Continue past the progress output line
        print("")
        print("%s imported in %s" % (plural(rows_written.value, "row"),
                                     plural(len(files_info), "table")))
    finally:
        signal.signal(signal.SIGINT, signal.SIG_DFL)

    if interrupt_event.is_set():
        raise RuntimeError("Interrupted")

    if len(errors) != 0:
        # multiprocessing queues don't handling tracebacks, so they've already been stringified in the queue
        for error in errors:
            print("%s" % error[1], file=sys.stderr)
            if options["debug"]:
                print("%s traceback: %s" % (error[0].__name__, error[2]), file=sys.stderr)
            if len(error) == 4:
                print("In file: %s" % error[3], file=sys.stderr)
        raise RuntimeError("Errors occurred during import")
开发者ID:llcheng01,项目名称:falcon_via_python,代码行数:88,代码来源:_import.py

示例13: measure_cmd_resources

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import get [as 别名]
    def measure_cmd_resources(self, cmd, name, legend, save_bs=False):
        """Measure system resource usage of a command"""
        def _worker(data_q, cmd, **kwargs):
            """Worker process for measuring resources"""
            try:
                start_time = datetime.now()
                ret = runCmd2(cmd, **kwargs)
                etime = datetime.now() - start_time
                rusage_struct = resource.getrusage(resource.RUSAGE_CHILDREN)
                iostat = OrderedDict()
                with open('/proc/{}/io'.format(os.getpid())) as fobj:
                    for line in fobj.readlines():
                        key, val = line.split(':')
                        iostat[key] = int(val)
                rusage = OrderedDict()
                # Skip unused fields, (i.e. 'ru_ixrss', 'ru_idrss', 'ru_isrss',
                # 'ru_nswap', 'ru_msgsnd', 'ru_msgrcv' and 'ru_nsignals')
                for key in ['ru_utime', 'ru_stime', 'ru_maxrss', 'ru_minflt',
                            'ru_majflt', 'ru_inblock', 'ru_oublock',
                            'ru_nvcsw', 'ru_nivcsw']:
                    rusage[key] = getattr(rusage_struct, key)
                data_q.put({'ret': ret,
                            'start_time': start_time,
                            'elapsed_time': etime,
                            'rusage': rusage,
                            'iostat': iostat})
            except Exception as err:
                data_q.put(err)

        cmd_str = cmd if isinstance(cmd, str) else ' '.join(cmd)
        log.info("Timing command: %s", cmd_str)
        data_q = SimpleQueue()
        try:
            proc = Process(target=_worker, args=(data_q, cmd,))
            proc.start()
            data = data_q.get()
            proc.join()
            if isinstance(data, Exception):
                raise data
        except CommandError:
            log.error("Command '%s' failed", cmd_str)
            raise
        etime = data['elapsed_time']

        measurement = OrderedDict([('type', self.SYSRES),
                                   ('name', name),
                                   ('legend', legend)])
        measurement['values'] = OrderedDict([('start_time', data['start_time']),
                                             ('elapsed_time', etime),
                                             ('rusage', data['rusage']),
                                             ('iostat', data['iostat'])])
        if save_bs:
            self.save_buildstats(name)

        self._append_measurement(measurement)

        # Append to 'times' array for globalres log
        e_sec = etime.total_seconds()
        self.times.append('{:d}:{:02d}:{:05.2f}'.format(int(e_sec / 3600),
                                                      int((e_sec % 3600) / 60),
                                                       e_sec % 60))
开发者ID:01org,项目名称:luv-yocto,代码行数:63,代码来源:base.py

示例14: scan_regionset

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import get [as 别名]
def scan_regionset(regionset, options):
    """ This function scans all te region files in a regionset object
    and fills the ScannedRegionFile obj with the results
    """

    total_regions = len(regionset.regions)
    total_chunks = 0
    corrupted_total = 0
    wrong_total = 0
    entities_total = 0
    too_small_total = 0
    unreadable = 0

    # init progress bar
    if not options.verbose:
        pbar = progressbar.ProgressBar(
            widgets=['Scanning: ', FractionWidget(), ' ', progressbar.Percentage(), ' ', progressbar.Bar(left='[',right=']'), ' ', progressbar.ETA()],
            maxval=total_regions)

    # queue used by processes to pass finished stuff
    q = SimpleQueue()
    pool = multiprocessing.Pool(processes=options.processes,
            initializer=_mp_pool_init,initargs=(regionset,options,q))

    if not options.verbose:
        pbar.start()

    # start the pool
    # Note to self: every child process has his own memory space,
    # that means every obj recived by them will be a copy of the
    # main obj
    result = pool.map_async(multithread_scan_regionfile, regionset.list_regions(None), max(1,total_regions//options.processes))

    # printing status
    region_counter = 0

    while not result.ready() or not q.empty():
        time.sleep(0.01)
        if not q.empty():
            r = q.get()
            if r == None: # something went wrong scanning this region file
                          # probably a bug... don't know if it's a good
                          # idea to skip it
                continue
            if not isinstance(r,world.ScannedRegionFile):
                raise ChildProcessException(r)
            else:
                corrupted, wrong, entities_prob, shared_offset, num_chunks = r.get_counters()
                filename = r.filename
                # the obj returned is a copy, overwrite it in regionset
                regionset[r.get_coords()] = r
                corrupted_total += corrupted
                wrong_total += wrong
                total_chunks += num_chunks
                entities_total += entities_prob
                if r.status == world.REGION_TOO_SMALL:
                    too_small_total += 1
                elif r.status == world.REGION_UNREADABLE:
                    unreadable += 1
                region_counter += 1
                if options.verbose:
                  if r.status == world.REGION_OK:
                    stats = "(c: {0}, w: {1}, tme: {2}, so: {3}, t: {4})".format( corrupted, wrong, entities_prob, shared_offset, num_chunks)
                  elif r.status == world.REGION_TOO_SMALL:
                    stats = "(Error: not a region file)"
                  elif r.status == world.REGION_UNREADABLE:
                    stats = "(Error: unreadable region file)"
                  print("Scanned {0: <12} {1:.<43} {2}/{3}".format(filename, stats, region_counter, total_regions))
                else:
                    pbar.update(region_counter)

    if not options.verbose: pbar.finish()

    regionset.scanned = True
开发者ID:delve,项目名称:Minecraft-Entity-Finder,代码行数:76,代码来源:scan.py

示例15: learning_loop

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import get [as 别名]
def learning_loop(exit_flag: mp.Value,
                  gvfs: Sequence[Sequence[GTDLearner]],
                  main2gvf: mp.SimpleQueue,
                  gvf2plot: mp.SimpleQueue,
                  parsrs: List[Callable]):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.01)
        if exit_flag.value == 0:
            if locks:
                print('gvf gm a 1 a')
                gmlock.acquire()
                print('gvf gm a 1 b')
            action, action_prob, obs, x = main2gvf.get()
            if locks:
                print('gvf gm r 1 a')
                gmlock.release()
                print('gvf gm r 1 b')

    # main loop
    # tt = 0
    # ts = []
    while exit_flag.value == 0:
        # ts.append(time.time() - tt) if tt > 0 else None
        # print(np.mean(ts))
        # tt = time.time()
        if locks:
            print('gvf gm a 2 a')
            gmlock.acquire()
            print('gvf gm a 2 b')
        while exit_flag.value == 0 and main2gvf.empty():
            if locks:
                print('gvf gm r 2 a')
                gmlock.release()
                print('gvf gm r 2 b')
            time.sleep(0.01)
            if locks:
                print('gvf gm a 3 a')
                gmlock.acquire()
                print('gvf gm a 3 b')
        if locks:
            print('gvf gm r 3 a')
            gmlock.release()
            print('gvf gm r 3 b')
        if exit_flag.value:
            break

        # get data from servos
        if locks:
            print('gvf gm a 4 a')
            gmlock.acquire()
            print('gvf gm a 4 b')
        actionp, action_probp, obsp, xp = main2gvf.get()
        if locks:
            print('gvf gm r 4 a')
            gmlock.release()
            print('gvf gm r 4 b')
        # update weights
        for gs, xi, xpi in zip(gvfs, x, xp):
            for g in gs:
                g.update(action, action_prob, obs, obsp, xi, xpi)

        # send data to plots
        gdata = [g.data(xi, obs, action, xpi, obsp)
                 for gs, xi, xpi in zip(gvfs, x, xp)
                 for g in gs]

        data = dict(ChainMap(*gdata))
        data['obs'] = obs
        data['x'] = x
        data = [parse(data) for parse in parsrs]
        if locks:
            print('gvf gp a 1 a')
            gplock.acquire()
            print('gvf gp a 1 b')
        # data = np.copy(data)
        gvf2plot.put(data)
        if locks:
            print('gvf gp r 1 a')
            gplock.release()
            print('gvf gp r 1 b')

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp

    print('Done learning!')
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:94,代码来源:final_project.py


注:本文中的multiprocessing.SimpleQueue.get方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。