当前位置: 首页>>代码示例>>Python>>正文


Python SimpleQueue.empty方法代码示例

本文整理汇总了Python中multiprocessing.SimpleQueue.empty方法的典型用法代码示例。如果您正苦于以下问题:Python SimpleQueue.empty方法的具体用法?Python SimpleQueue.empty怎么用?Python SimpleQueue.empty使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.SimpleQueue的用法示例。


在下文中一共展示了SimpleQueue.empty方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: learning_loop

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import empty [as 别名]
def learning_loop(exit_flag: mp.Value,
                  gvfs: Sequence[Sequence[GTDLearner]],
                  main2gvf: mp.SimpleQueue,
                  gvf2plot: mp.SimpleQueue):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value == 0:
            action, action_prob, obs, x = main2gvf.get()

    i = 1

    # main loop
    while exit_flag.value == 0:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value:
            break

        i += 1
        ude = False
        rupee = False
        if 5000 < i < 5100:
            ude = True
        if i == 7000:
            rupee = True

        # get data from servos
        actionp, action_probp, obsp, xp = main2gvf.get()

        # update weights
        for gs, xi, xpi in zip(gvfs, x, xp):
            for g in gs:
                g.update(action, action_prob, obs, obsp, xi, xpi, ude, rupee)

        # send data to plots
        gdata = [[g.data(xi, obs, action, xpi, obsp)
                  for g in gs]
                 for gs, xi, xpi in zip(gvfs, x, xp)]
        data = dict(ChainMap(*chain.from_iterable(gdata)))
        data['obs'] = obs
        gvf2plot.put(data)

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp

    print('Done learning!')
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:55,代码来源:module3.py

示例2: learning_loop

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import empty [as 别名]
def learning_loop(exit_flag: mp.Value,
                  gvfs: Sequence[Sequence[Learner]],
                  behaviour_gvf: SARSA,
                  main2gvf: mp.SimpleQueue,
                  gvf2main: mp.SimpleQueue,
                  gvf2plot: mp.SimpleQueue):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value == 0:
            obs, x = main2gvf.get()
            action, action_prob = behaviour_gvf.policy(obs=obs, x=x)
            gvf2main.put(action)

    # main loop
    while exit_flag.value == 0:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value:
            break

        # get data from servos
        obsp, xp = main2gvf.get()
        actionp, action_probp = behaviour_gvf.policy(obs=obsp, x=xp)

        # update weights
        for g in chain.from_iterable(gvfs):
            g.update(x, obs,
                     action, action_prob,
                     xp, obsp,
                     actionp, action_probp)

        # send action
        gvf2main.put(actionp)

        # send data to plots
        gdata = [[g.data(x, obs, action, xp, obsp)
                  for g in gs]
                 for gs in gvfs]
        data = dict(ChainMap(*chain.from_iterable(gdata)))
        data['obs'] = obs
        gvf2plot.put(data)

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp

    print('Done learning!')
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:55,代码来源:module4.py

示例3: data_from_file

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import empty [as 别名]
async def data_from_file(main2gvf: mp.SimpleQueue,
                         gvf2plot: mp.SimpleQueue,
                         coder: KanervaCoder):
    data = np.load('offline_data.npy')

    for item in data:
        item[-1] = coder(item[-2])
        main2gvf.put(item)

    time.sleep(0.1)
    while not gvf2plot.empty():
        time.sleep(0.1)
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:14,代码来源:module_two.py

示例4: learning_loop

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import empty [as 别名]
def learning_loop(exit_flag: mp.Value,
                  gvfs: Sequence[GTDLearner],
                  main2gvf: mp.SimpleQueue,
                  gvf2plot: mp.SimpleQueue):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value == 0:
            action, action_prob, obs, x = main2gvf.get()

    # main loop
    while exit_flag.value == 0:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.001)
        if exit_flag.value:
            break

        # get data from servos
        actionp, action_probp, obsp, xp = main2gvf.get()

        # update weights
        for g in gvfs:
            g.update(action, action_prob, obs, obsp, x, xp)

        # send data to plots
        data = [[obs]] + [g.data(x, obs, action, xp, obsp) for g in gvfs]
        gvf2plot.put(data)

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:38,代码来源:module_two.py

示例5: plotting_loop

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import empty [as 别名]
def plotting_loop(exit_flag: mp.Value,
                  gvf2plot: mp.SimpleQueue,
                  plots: Sequence[Plot]):
    while exit_flag.value == 0:
        if locks:
            print('plot gp a 1 a')
            gplock.acquire()
            print('plot gp a 1 b')
        while exit_flag.value == 0 and gvf2plot.empty():
            if locks:
                print('plot gp r 1 a')
                gplock.release()
                print('plot gp r 1 b')
            time.sleep(0.001)
            if locks:
                print('plot gp a 2 a')
                gplock.acquire()
                print('plot gp a 2 b')

        if locks:
            print('plot gp r 2 a')
            gplock.release()
            print('plot gp r 2 b')
        if exit_flag.value:
            break

        if locks:
            print('plot gp a 3 a')
            gplock.acquire()
            print('plot gp a 3 b')
        d = gvf2plot.get()
        if locks:
            print('plot gp r 3 a')
            gplock.release()
            print('plot gp r 3 b')

        for plot, data in zip(plots, d):
            plot.update(data)

    for plot in plots:
        try:
            index = np.arange(len(plot.y[0]))
            np.savetxt(f"{plot.title}.csv",
                       np.column_stack(sum(((np.asarray(y),) for y in plot.y),
                                           (index,))),
                       delimiter=',')
        except ValueError:
            continue
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:50,代码来源:final_project.py

示例6: plotting_loop

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import empty [as 别名]
def plotting_loop(exit_flag: mp.Value,
                  gvf2plot: mp.SimpleQueue,
                  plots: Sequence[Plot]):

    while exit_flag.value == 0:
        while exit_flag.value == 0 and gvf2plot.empty():
            time.sleep(0.001)
        if exit_flag.value:
            break
        data = gvf2plot.get()

        for plot in plots:
            plot.update(data)

    for plot in plots:
        index = np.arange(len(plot.y[0]))
        np.savetxt(f"{plot.title}.csv",
                   sum(((np.asarray(y),) for y in plot.y), (index,)),
                   delimiter=',')
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:21,代码来源:module_two.py

示例7: spawn_import_clients

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import empty [as 别名]
def spawn_import_clients(options, files_info):
    # Spawn one reader process for each db.table, as well as many client processes
    task_queue = SimpleQueue()
    error_queue = SimpleQueue()
    exit_event = multiprocessing.Event()
    interrupt_event = multiprocessing.Event()
    errors = []
    reader_procs = []
    client_procs = []

    parent_pid = os.getpid()
    signal.signal(signal.SIGINT, lambda a, b: abort_import(a, b, parent_pid, exit_event, task_queue, client_procs, interrupt_event))

    try:
        progress_info = []
        rows_written = multiprocessing.Value(ctypes.c_longlong, 0)

        for i in xrange(options["clients"]):
            client_procs.append(multiprocessing.Process(target=client_process,
                                                        args=(options["host"],
                                                              options["port"],
                                                              options["auth_key"],
                                                              task_queue,
                                                              error_queue,
                                                              rows_written,
                                                              options["force"],
                                                              options["durability"])))
            client_procs[-1].start()

        for file_info in files_info:
            progress_info.append((multiprocessing.Value(ctypes.c_longlong, -1), # Current lines/bytes processed
                                  multiprocessing.Value(ctypes.c_longlong, 0))) # Total lines/bytes to process
            reader_procs.append(multiprocessing.Process(target=table_reader,
                                                        args=(options,
                                                              file_info,
                                                              task_queue,
                                                              error_queue,
                                                              progress_info[-1],
                                                              exit_event)))
            reader_procs[-1].start()

        # Wait for all reader processes to finish - hooray, polling
        while len(reader_procs) > 0:
            time.sleep(0.1)
            # If an error has occurred, exit out early
            while not error_queue.empty():
                exit_event.set()
                errors.append(error_queue.get())
            reader_procs = [proc for proc in reader_procs if proc.is_alive()]
            update_progress(progress_info)

        # Wait for all clients to finish
        alive_clients = sum([client.is_alive() for client in client_procs])
        for i in xrange(alive_clients):
            task_queue.put(StopIteration())

        while len(client_procs) > 0:
            time.sleep(0.1)
            client_procs = [client for client in client_procs if client.is_alive()]

        # If we were successful, make sure 100% progress is reported
        if len(errors) == 0 and not interrupt_event.is_set():
            print_progress(1.0)

        def plural(num, text):
            return "%d %s%s" % (num, text, "" if num == 1 else "s")

        # Continue past the progress output line
        print("")
        print("%s imported in %s" % (plural(rows_written.value, "row"),
                                     plural(len(files_info), "table")))
    finally:
        signal.signal(signal.SIGINT, signal.SIG_DFL)

    if interrupt_event.is_set():
        raise RuntimeError("Interrupted")

    if len(errors) != 0:
        # multiprocessing queues don't handling tracebacks, so they've already been stringified in the queue
        for error in errors:
            print("%s" % error[1], file=sys.stderr)
            if options["debug"]:
                print("%s traceback: %s" % (error[0].__name__, error[2]), file=sys.stderr)
            if len(error) == 4:
                print("In file: %s" % error[3], file=sys.stderr)
        raise RuntimeError("Errors occurred during import")
开发者ID:llcheng01,项目名称:falcon_via_python,代码行数:88,代码来源:_import.py

示例8: scan_regionset

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import empty [as 别名]
def scan_regionset(regionset, options):
    """ This function scans all te region files in a regionset object
    and fills the ScannedRegionFile obj with the results
    """

    total_regions = len(regionset.regions)
    total_chunks = 0
    corrupted_total = 0
    wrong_total = 0
    entities_total = 0
    too_small_total = 0
    unreadable = 0

    # init progress bar
    if not options.verbose:
        pbar = progressbar.ProgressBar(
            widgets=['Scanning: ', FractionWidget(), ' ', progressbar.Percentage(), ' ', progressbar.Bar(left='[',right=']'), ' ', progressbar.ETA()],
            maxval=total_regions)

    # queue used by processes to pass finished stuff
    q = SimpleQueue()
    pool = multiprocessing.Pool(processes=options.processes,
            initializer=_mp_pool_init,initargs=(regionset,options,q))

    if not options.verbose:
        pbar.start()

    # start the pool
    # Note to self: every child process has his own memory space,
    # that means every obj recived by them will be a copy of the
    # main obj
    result = pool.map_async(multithread_scan_regionfile, regionset.list_regions(None), max(1,total_regions//options.processes))

    # printing status
    region_counter = 0

    while not result.ready() or not q.empty():
        time.sleep(0.01)
        if not q.empty():
            r = q.get()
            if r == None: # something went wrong scanning this region file
                          # probably a bug... don't know if it's a good
                          # idea to skip it
                continue
            if not isinstance(r,world.ScannedRegionFile):
                raise ChildProcessException(r)
            else:
                corrupted, wrong, entities_prob, shared_offset, num_chunks = r.get_counters()
                filename = r.filename
                # the obj returned is a copy, overwrite it in regionset
                regionset[r.get_coords()] = r
                corrupted_total += corrupted
                wrong_total += wrong
                total_chunks += num_chunks
                entities_total += entities_prob
                if r.status == world.REGION_TOO_SMALL:
                    too_small_total += 1
                elif r.status == world.REGION_UNREADABLE:
                    unreadable += 1
                region_counter += 1
                if options.verbose:
                  if r.status == world.REGION_OK:
                    stats = "(c: {0}, w: {1}, tme: {2}, so: {3}, t: {4})".format( corrupted, wrong, entities_prob, shared_offset, num_chunks)
                  elif r.status == world.REGION_TOO_SMALL:
                    stats = "(Error: not a region file)"
                  elif r.status == world.REGION_UNREADABLE:
                    stats = "(Error: unreadable region file)"
                  print("Scanned {0: <12} {1:.<43} {2}/{3}".format(filename, stats, region_counter, total_regions))
                else:
                    pbar.update(region_counter)

    if not options.verbose: pbar.finish()

    regionset.scanned = True
开发者ID:delve,项目名称:Minecraft-Entity-Finder,代码行数:76,代码来源:scan.py

示例9: learning_loop

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import empty [as 别名]
def learning_loop(exit_flag: mp.Value,
                  gvfs: Sequence[Sequence[GTDLearner]],
                  main2gvf: mp.SimpleQueue,
                  gvf2plot: mp.SimpleQueue,
                  parsrs: List[Callable]):
    action, action_prob, obs, x = None, None, None, None

    # get first state
    while exit_flag.value == 0 and obs is None:
        while exit_flag.value == 0 and main2gvf.empty():
            time.sleep(0.01)
        if exit_flag.value == 0:
            if locks:
                print('gvf gm a 1 a')
                gmlock.acquire()
                print('gvf gm a 1 b')
            action, action_prob, obs, x = main2gvf.get()
            if locks:
                print('gvf gm r 1 a')
                gmlock.release()
                print('gvf gm r 1 b')

    # main loop
    # tt = 0
    # ts = []
    while exit_flag.value == 0:
        # ts.append(time.time() - tt) if tt > 0 else None
        # print(np.mean(ts))
        # tt = time.time()
        if locks:
            print('gvf gm a 2 a')
            gmlock.acquire()
            print('gvf gm a 2 b')
        while exit_flag.value == 0 and main2gvf.empty():
            if locks:
                print('gvf gm r 2 a')
                gmlock.release()
                print('gvf gm r 2 b')
            time.sleep(0.01)
            if locks:
                print('gvf gm a 3 a')
                gmlock.acquire()
                print('gvf gm a 3 b')
        if locks:
            print('gvf gm r 3 a')
            gmlock.release()
            print('gvf gm r 3 b')
        if exit_flag.value:
            break

        # get data from servos
        if locks:
            print('gvf gm a 4 a')
            gmlock.acquire()
            print('gvf gm a 4 b')
        actionp, action_probp, obsp, xp = main2gvf.get()
        if locks:
            print('gvf gm r 4 a')
            gmlock.release()
            print('gvf gm r 4 b')
        # update weights
        for gs, xi, xpi in zip(gvfs, x, xp):
            for g in gs:
                g.update(action, action_prob, obs, obsp, xi, xpi)

        # send data to plots
        gdata = [g.data(xi, obs, action, xpi, obsp)
                 for gs, xi, xpi in zip(gvfs, x, xp)
                 for g in gs]

        data = dict(ChainMap(*gdata))
        data['obs'] = obs
        data['x'] = x
        data = [parse(data) for parse in parsrs]
        if locks:
            print('gvf gp a 1 a')
            gplock.acquire()
            print('gvf gp a 1 b')
        # data = np.copy(data)
        gvf2plot.put(data)
        if locks:
            print('gvf gp r 1 a')
            gplock.release()
            print('gvf gp r 1 b')

        # go to next state
        obs = obsp
        x = xp
        action = actionp
        action_prob = action_probp

    print('Done learning!')
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:94,代码来源:final_project.py

示例10: run_clients

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import empty [as 别名]
def run_clients(options, db_table_set):
    # Spawn one client for each db.table
    exit_event = multiprocessing.Event()
    processes = []
    error_queue = SimpleQueue()
    interrupt_event = multiprocessing.Event()
    sindex_counter = multiprocessing.Value(ctypes.c_longlong, 0)

    signal.signal(signal.SIGINT, lambda a, b: abort_export(a, b, exit_event, interrupt_event))
    errors = [ ]

    try:
        sizes = get_all_table_sizes(options["host"], options["port"], options["auth_key"], db_table_set)

        progress_info = []

        arg_lists = []
        for db, table in db_table_set:
            progress_info.append((multiprocessing.Value(ctypes.c_longlong, 0),
                                  multiprocessing.Value(ctypes.c_longlong, sizes[(db, table)])))
            arg_lists.append((options["host"],
                              options["port"],
                              options["auth_key"],
                              db, table,
                              options["directory_partial"],
                              options["fields"],
                              options["delimiter"],
                              options["format"],
                              error_queue,
                              progress_info[-1],
                              sindex_counter,
                              exit_event))


        # Wait for all tables to finish
        while len(processes) > 0 or len(arg_lists) > 0:
            time.sleep(0.1)

            while not error_queue.empty():
                exit_event.set() # Stop rather immediately if an error occurs
                errors.append(error_queue.get())

            processes = [process for process in processes if process.is_alive()]

            if len(processes) < options["clients"] and len(arg_lists) > 0:
                processes.append(multiprocessing.Process(target=export_table,
                                                         args=arg_lists.pop(0)))
                processes[-1].start()

            update_progress(progress_info)

        # If we were successful, make sure 100% progress is reported
        # (rows could have been deleted which would result in being done at less than 100%)
        if len(errors) == 0 and not interrupt_event.is_set():
            print_progress(1.0)

        # Continue past the progress output line and print total rows processed
        def plural(num, text, plural_text):
            return "%d %s" % (num, text if num == 1 else plural_text)

        print("")
        print("%s exported from %s, with %s" %
              (plural(sum([max(0, info[0].value) for info in progress_info]), "row", "rows"),
               plural(len(db_table_set), "table", "tables"),
               plural(sindex_counter.value, "secondary index", "secondary indexes")))
    finally:
        signal.signal(signal.SIGINT, signal.SIG_DFL)

    if interrupt_event.is_set():
        raise RuntimeError("Interrupted")

    if len(errors) != 0:
        # multiprocessing queues don't handling tracebacks, so they've already been stringified in the queue
        for error in errors:
            print("%s" % error[1], file=sys.stderr)
            if options["debug"]:
                print("%s traceback: %s" % (error[0].__name__, error[2]), file=sys.stderr)
        raise RuntimeError("Errors occurred during export")
开发者ID:HiroIshikawa,项目名称:21playground,代码行数:80,代码来源:_export.py

示例11: SimpleQueue

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import empty [as 别名]
        for tax in taxa_range:
            for mdl in models:
                for gbr in gene_branch_len:
                    for gstdv in gene_branch_stdev:
                        for sbr in species_branch_len:
                            for sstdv in species_branch_stdev:
                                for alp in alphas:
                                    for cat in category_range:
                                        for drp in drop_chances:
                                            for ndr in num_drops_range:
                                                for dup in duplication_chances:
                                                    for ndp in num_duplications_range:
                                                        arguments.append((grp, tax, mdl, gbr, gstdv, sbr, sstdv,
                                                                          alp, cat, drp, ndr, dup, ndp))

    arguments *= in_args.replicates
    broker_queue = SimpleQueue()
    broker = Process(target=broker_func, args=[broker_queue, in_args.output])
    broker.daemon = True
    broker.start()

    run_multicore_function(arguments, generate)

    os.remove("site_rates_info.txt")
    os.remove("site_rates.txt")

    while not broker_queue.empty():
        pass

    broker.terminate()
开发者ID:biologyguy,项目名称:RD-MCL,代码行数:32,代码来源:evolution_sim.py

示例12: InternalClient

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import empty [as 别名]
class InternalClient(Client):
    """This client is a fake client which is responsible for firing off
    all messages from the update notification side, and handling the
    routing of those messages to users watching.

    It does not have a socket, so it should not be included in the
    server's clients dictionary.
    """

    def __init__(self, server, nickname, user, host='localhost'):
        self.server = server
        self.nickname = nickname
        self.realname = nickname
        self.user = user
        self.host = host

        self._readbuffer = ""
        self._writebuffer = ""
        self.request_queue = SimpleQueue()
        self.response_queue = SimpleQueue()

        # dict of board => list of users
        self.board_watchers = defaultdict(list)

        # dict of board, thread => list of users
        self.thread_watchers = defaultdict(lambda: defaultdict(list))

        Process(
            target=Ami,
            name='immediate api worker',
            args=(self.request_queue, self.response_queue)
        ).start()

    def loop_hook(self):
        while not self.response_queue.empty():
            result = self.response_queue.get()

            # Handle exceptions in-band from child workers here.
            if isinstance(result, StoredException):
                print(result.traceback)
                raise RuntimeError(
                    "Exception caught from worker '{}', see above for exception details".format(
                        result.process,
                ))

            logger.debug("read from response queue {}".format(result))

            send_as = "/{}/{}".format(result.board, result.post_no)

            # Initial channel loads have identifiers, use them to find out
            # where to go
            if result.identifier:
                client, channel, target = result.identifier
                client = self.server.get_client(client)
                logger.debug("initial channel load, using identitifier info: sending to {} on {}".format(client, channel))

                if isinstance(target, BoardTarget):
                    self._send_message(
                        client, channel, result.summary,
                        sending_nick=send_as,
                    )
                    continue
                elif isinstance(target, ThreadTarget):
                    self._send_message(
                        client, channel, result.comment,
                        sending_nick=send_as,
                    )
                    continue

            if result.is_reply:  # Send to thread channel
                channel = "#/{}/{}".format(result.board, result.reply_to)
                logger.debug("sending reply to channel {}".format(channel))

                # TODO: Remove users who have disconnected from the server here
                for client in self.thread_watchers[result.board][result.reply_to]:
                    logger.debug("sending reply to {}".format(client))
                    self._send_message(
                        client, channel, result.comment,
                        sending_nick=send_as,
                    )
            else:
                channel = "#/{}/".format(result.board)
                logger.debug("sending thread update to channel {}".format(channel))

                # TODO: Remove users who have disconnected from the server here
                for client in self.board_watchers[result.board]:
                    self._send_message(
                        client, channel, result.summary,
                        sending_nick=send_as,
                    )

    def _parse_prefix(self, prefix):
        m = re.search(
            ":(?P<nickname>[^!]*)!(?P<username>[^@]*)@(?P<host>.*)",
            prefix
        )
        return m.groupdict()

    @property
    def socket(self):
#.........这里部分代码省略.........
开发者ID:ATRAN2,项目名称:Futami,代码行数:103,代码来源:client.py

示例13: run_clients

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import empty [as 别名]
def run_clients(options, workingDir, db_table_set):
    # Spawn one client for each db.table, up to options.clients at a time
    exit_event = multiprocessing.Event()
    processes = []
    error_queue = SimpleQueue()
    interrupt_event = multiprocessing.Event()
    sindex_counter = multiprocessing.Value(ctypes.c_longlong, 0)
    hook_counter = multiprocessing.Value(ctypes.c_longlong, 0)
    
    signal.signal(signal.SIGINT, lambda a, b: abort_export(a, b, exit_event, interrupt_event))
    errors = []

    try:
        progress_info = []
        arg_lists = []
        for db, table in db_table_set:
            
            tableSize = int(options.retryQuery("count", query.db(db).table(table).info()['doc_count_estimates'].sum()))
            
            progress_info.append((multiprocessing.Value(ctypes.c_longlong, 0),
                                  multiprocessing.Value(ctypes.c_longlong, tableSize)))
            arg_lists.append((db, table,
                              workingDir,
                              options,
                              error_queue,
                              progress_info[-1],
                              sindex_counter,
                              hook_counter,
                              exit_event,
                              ))


        # Wait for all tables to finish
        while processes or arg_lists:
            time.sleep(0.1)

            while not error_queue.empty():
                exit_event.set() # Stop immediately if an error occurs
                errors.append(error_queue.get())

            processes = [process for process in processes if process.is_alive()]

            if len(processes) < options.clients and len(arg_lists) > 0:
                newProcess = multiprocessing.Process(target=export_table, args=arg_lists.pop(0))
                newProcess.start()
                processes.append(newProcess)

            update_progress(progress_info, options)

        # If we were successful, make sure 100% progress is reported
        # (rows could have been deleted which would result in being done at less than 100%)
        if len(errors) == 0 and not interrupt_event.is_set() and not options.quiet:
            utils_common.print_progress(1.0, indent=4)

        # Continue past the progress output line and print total rows processed
        def plural(num, text, plural_text):
            return "%d %s" % (num, text if num == 1 else plural_text)

        if not options.quiet:
            print("\n    %s exported from %s, with %s, and %s" %
                  (plural(sum([max(0, info[0].value) for info in progress_info]), "row", "rows"),
                   plural(len(db_table_set), "table", "tables"),
                   plural(sindex_counter.value, "secondary index", "secondary indexes"),
                   plural(hook_counter.value, "hook function", "hook functions")
            ))
    finally:
        signal.signal(signal.SIGINT, signal.SIG_DFL)

    if interrupt_event.is_set():
        raise RuntimeError("Interrupted")

    if len(errors) != 0:
        # multiprocessing queues don't handle tracebacks, so they've already been stringified in the queue
        for error in errors:
            print("%s" % error[1], file=sys.stderr)
            if options.debug:
                print("%s traceback: %s" % (error[0].__name__, error[2]), file=sys.stderr)
        raise RuntimeError("Errors occurred during export")
开发者ID:AtnNn,项目名称:rethinkdb,代码行数:80,代码来源:_export.py

示例14: AsyncScanner

# 需要导入模块: from multiprocessing import SimpleQueue [as 别名]
# 或者: from multiprocessing.SimpleQueue import empty [as 别名]
class AsyncScanner(object):
    """ Class to derive all the scanner classes from.

    To implement a scanner you have to override:
    update_str_last_scanned()
    Use try-finally to call terminate, if not processes will be
    hanging in the background
     """
    def __init__(self, data_structure, processes, scan_function, init_args,
                 _mp_init_function):
        """ Init the scanner.

        data_structure is a world.DataSet
        processes is the number of child processes to use
        scan_function is the function to use for scanning
        init_args are the arguments passed to the init function
        _mp_init_function is the function used to init the child processes
        """
        assert(isinstance(data_structure, world.DataSet))
        self.data_structure = data_structure
        self.list_files_to_scan = data_structure._get_list()
        self.processes = processes
        self.scan_function = scan_function

        # Queue used by processes to pass results
        self.queue = SimpleQueue()
        init_args.update({'queue': self.queue})
        # NOTE TO SELF: initargs doesn't handle kwargs, only args!
        # Pass a dict with all the args
        self.pool = multiprocessing.Pool(processes=processes,
                initializer=_mp_init_function,
                initargs=(init_args,))

        # Recommended time to sleep between polls for results
        self.SCAN_START_SLEEP_TIME = 0.001
        self.SCAN_MIN_SLEEP_TIME = 1e-6
        self.SCAN_MAX_SLEEP_TIME = 0.1
        self.scan_sleep_time = self.SCAN_START_SLEEP_TIME
        self.queries_without_results = 0
        self.last_time = time()
        self.MIN_QUERY_NUM = 1
        self.MAX_QUERY_NUM = 5

        # Holds a friendly string with the name of the last file scanned
        self._str_last_scanned = None

    def scan(self):
        """ Launch the child processes and scan all the files. """
        
        logging.debug("########################################################")
        logging.debug("########################################################")
        logging.debug("Starting scan in: " + str(self))
        logging.debug("########################################################")
        logging.debug("########################################################")
        # Tests indicate that smaller amount of jobs per worker make all type
        # of scans faster
        jobs_per_worker = 5
        #jobs_per_worker = max(1, total_files // self.processes
        self._results = self.pool.map_async(self.scan_function,
                                            self.list_files_to_scan,
                                            jobs_per_worker)
                                            
        # No more tasks to the pool, exit the processes once the tasks are done
        self.pool.close()

        # See method
        self._str_last_scanned = ""

    def get_last_result(self):
        """ Return results of last file scanned. """

        q = self.queue
        ds = self.data_structure
        if not q.empty():
            d = q.get()
            if isinstance(d, tuple):
                self.raise_child_exception(d)
            # Copy it to the father process
            ds._replace_in_data_structure(d)
            ds._update_counts(d)
            self.update_str_last_scanned(d)
            # Got result! Reset it!
            self.queries_without_results = 0
            return d
        else:
            # Count amount of queries without result
            self.queries_without_results += 1
            return None

    def terminate(self):
        """ Terminate the pool, this will exit no matter what.
        """
        self.pool.terminate()

    def raise_child_exception(self, exception_tuple):
        """ Raises a ChildProcessException using the info
        contained in the tuple returned by the child process. """
        e = exception_tuple
        raise ChildProcessException(e[0], e[1][0], e[1][1], e[1][2])

#.........这里部分代码省略.........
开发者ID:Fenixin,项目名称:Minecraft-Region-Fixer,代码行数:103,代码来源:scan.py


注:本文中的multiprocessing.SimpleQueue.empty方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。