当前位置: 首页>>代码示例>>Python>>正文


Python Event.set方法代码示例

本文整理汇总了Python中multiprocessing.Event.set方法的典型用法代码示例。如果您正苦于以下问题:Python Event.set方法的具体用法?Python Event.set怎么用?Python Event.set使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.Event的用法示例。


在下文中一共展示了Event.set方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: StoppableProcess

# 需要导入模块: from multiprocessing import Event [as 别名]
# 或者: from multiprocessing.Event import set [as 别名]
class StoppableProcess(Process):
    exit = None
    sleep = None

    def __init__(self, sleep=1, *args, **kwargs):
        self.exit = Event()
        self.sleep = sleep
        super(StoppableProcess, self).__init__(*args, **kwargs)

    def _setup(self):
        pass

    def _teardown(self):
        pass

    def _ping(self):
        raise NotImplementedError

    def _should_exit(self):
        return self.exit.wait(0)

    def run(self):
        self._setup()
        while True:
            if self._ping() or self.exit.wait(self.sleep * 1.0):
                self._teardown()
                return

    def stop(self):
        self.exit.set()
        self.join(self.sleep)
        if self.is_alive():
            self.terminate()
开发者ID:c17r,项目名称:tic-tweet-toe,代码行数:35,代码来源:stoppable_process.py

示例2: MistProcess

# 需要导入模块: from multiprocessing import Event [as 别名]
# 或者: from multiprocessing.Event import set [as 别名]
class MistProcess(Process):

    def __init__(self, gpio, sleep=1, name='MistProcess'):
        Process.__init__(self, name=name)
        self.logger = multiprocessing.get_logger()
        self.event = Event()
        self.name = name
        self.gpio = gpio
        self.sleep = sleep
        self.mist = mraa.Gpio(self.gpio)
        self.mist.dir(mraa.DIR_OUT)

    def _mist_on(self):
        self.logger.debug('Mist on')
        self.mist.write(1)

    def _mist_off(self):
        self.logger.debug('Mist off')
        if self.mist:
            self.mist.write(0)

    def run(self):
        self.event.set()
        self.logger.debug('PID: %d' % multiprocessing.current_process().pid)

        while self.event.is_set():
            self._mist_on()
            time.sleep(self.sleep)

    def stop(self):
        self.logger.debug('Process {} will halt.'.format(self.name))
        self.event.clear()
        self._mist_off()
开发者ID:GaragemHacker,项目名称:curytybainbox,代码行数:35,代码来源:mist.py

示例3: Updater

# 需要导入模块: from multiprocessing import Event [as 别名]
# 或者: from multiprocessing.Event import set [as 别名]
class Updater(Process):

    def __init__(self, maxsize=15):
        Process.__init__(self)
        #self.queue = Queue(maxsize)
        self.queue = Queue()
        self.queue_lock = Lock()
        self._exit = Event()

    def run(self):
        while not self._exit.is_set():
            #with self.queue_lock:
            self.queue.put(self.receive())
            #self.queue.put_nowait(self.receive())
            #if self.queue.full():
            #    try:
            #        self.queue.get_nowait()
            #    except:
            #        pass

    def stop(self):
        self._exit.set()
        # This leaves the process hanging on Windows
        #self.join(STOP_TIMEOUT)
        if self.is_alive():
            #TODO make a nicer warning
            print 'Terminating updater:', self
            self.terminate()

    def receive(self):
        raise NotImplementedError
开发者ID:KN2C,项目名称:pyroboime,代码行数:33,代码来源:updater.py

示例4: test_sentinel

# 需要导入模块: from multiprocessing import Event [as 别名]
# 或者: from multiprocessing.Event import set [as 别名]
def test_sentinel():
    start_event = Event()
    stop_event = Event()
    stop_event.set()
    s = Sentinel(stop_event, start_event, broker=get_broker('sentinel_test:q'))
    assert start_event.is_set()
    assert s.status() == Conf.STOPPED
开发者ID:277800076,项目名称:django-q,代码行数:9,代码来源:test_cluster.py

示例5: test_cluster

# 需要导入模块: from multiprocessing import Event [as 别名]
# 或者: from multiprocessing.Event import set [as 别名]
def test_cluster(r):
    list_key = 'cluster_test:q'
    r.delete(list_key)
    task = async('django_q.tests.tasks.count_letters', DEFAULT_WORDLIST, list_key=list_key)
    assert queue_size(list_key=list_key, r=r) == 1
    task_queue = Queue()
    assert task_queue.qsize() == 0
    result_queue = Queue()
    assert result_queue.qsize() == 0
    event = Event()
    event.set()
    # Test push
    pusher(task_queue, event, list_key=list_key, r=r)
    assert task_queue.qsize() == 1
    assert queue_size(list_key=list_key, r=r) == 0
    # Test work
    task_queue.put('STOP')
    worker(task_queue, result_queue, Value('f', -1))
    assert task_queue.qsize() == 0
    assert result_queue.qsize() == 1
    # Test monitor
    result_queue.put('STOP')
    monitor(result_queue)
    assert result_queue.qsize() == 0
    # check result
    assert result(task) == 1506
    r.delete(list_key)
开发者ID:sebasmagri,项目名称:django-q,代码行数:29,代码来源:test_cluster.py

示例6: recog_proc

# 需要导入模块: from multiprocessing import Event [as 别名]
# 或者: from multiprocessing.Event import set [as 别名]
    def recog_proc(self, child_recog: Pipe, e_recog: Event, yolo_type: str):
        """
        Parallel process for object recognition

        Arguments:
            child_recog {Pipe} -- pipe for communication with parent process,
                sends bbox yolo type of recognized object
            e_recog {Event} -- event for indicating complete recognize in frame
        """

        # initialize YOLO
        yolo = Yolo(yolo_type)
        e_recog.set()
        print("yolo defined")

        while True:
            frame = child_recog.recv()
            print("recog process frame recieved")
            if frame is None:
                print("FRAME NONE? R U SURE ABOUT THAT?!")
                return
            res = yolo.detect(frame, cvmat=True)
            print("recog send")
            e_recog.set()
            child_recog.send(res)
开发者ID:DiggiDon,项目名称:Tracking_system,代码行数:27,代码来源:track_system.py

示例7: DataLoaderOnTheFly

# 需要导入模块: from multiprocessing import Event [as 别名]
# 或者: from multiprocessing.Event import set [as 别名]
class DataLoaderOnTheFly():
    def __init__(self, config):
        default_config = Config(proc_count = 4, limit_batch_count = None)
        self.config = default_config(**config)
        self.exit = Event()
        self.batch_queue = Queue(maxsize = 10)
        if self.config.limit_batch_count is None:
            self.limited = False
        else:
            self.limited = True
            self.batch_list = []
            self.index = -1
        self.workers = []
        for _ in range(self.config.proc_count):
            self.workers.append(Process(target = config.worker, args = (self,)))
        for w in self.workers:
            w.daemon = True
            w.start()
    def next_batch(self):
        if self.limited:
            if len(self.batch_list) < self.config.limit_batch_count:
                self.batch_list.append(Config(self.batch_queue.get()))
            self.index = (self.index + 1) % self.config.limit_batch_count
            return Config(self.batch_list[self.index])
        else:
            return Config(self.batch_queue.get())
    def __del__(self):
        self.exit.set()
        for w in self.workers:
            w.join()
开发者ID:WarBean,项目名称:MLUtil,代码行数:32,代码来源:data_loader.py

示例8: StoppableProcess

# 需要导入模块: from multiprocessing import Event [as 别名]
# 或者: from multiprocessing.Event import set [as 别名]
class StoppableProcess(Process):
    """ Base class for Processes which require the ability
    to be stopped by a process-safe method call
    """

    def __init__(self):
        self._should_stop = Event()
        self._should_stop.clear()
        super(StoppableProcess, self).__init__()

    def join(self, timeout=0):
        """ Joins the current process and forces it to stop after
        the timeout if necessary

        :param timeout: Timeout duration in seconds
        """
        self._should_stop.wait(timeout)
        if not self.should_stop():
            self.stop()
        super(StoppableProcess, self).join(0)

    def stop(self):
        self._should_stop.set()

    def should_stop(self):
        return self._should_stop.is_set()

    def __repr__(self):
        return "<%s(should_stop=%s)>" % (
            self.__class__.__name__, self.should_stop())
开发者ID:ddale,项目名称:pymeasure,代码行数:32,代码来源:process.py

示例9: SharedFile

# 需要导入模块: from multiprocessing import Event [as 别名]
# 或者: from multiprocessing.Event import set [as 别名]
class SharedFile(object):
    def __init__(self, filename):
        self.filename = filename
        self.fevent = Event()
        # self.state = Value('i', 0)
        self.fevent.set()

    def write(self, mode, data):
        # print("Write {}".format(inspect.stack()[1][3]))
        self.wait_freedom_and_lock()

        f = open(self.filename, mode)
        f.write(data)
        f.close
        self.unlock()

    def read(self):
        # print("Read {}".format(inspect.stack()[1][3]))
        self.wait_freedom_and_lock()

        f = open(self.filename, 'r')
        data = f.read()
        f.close
        self.unlock()
        return data

    def wait_freedom_and_lock(self):
        self.fevent.wait()
        self.fevent.clear()
        # return

    def unlock(self):
        self.fevent.set()
开发者ID:thomasfire,项目名称:agent_smith,代码行数:35,代码来源:multiio.py

示例10: __init__

# 需要导入模块: from multiprocessing import Event [as 别名]
# 或者: from multiprocessing.Event import set [as 别名]
class Transcoder:
    def __init__(self):
        self.stopping = Event()

    def stop(self):
        logger.debug("Preventing new transcoding processes.")
        self.stopping.set()

    def transcode(self, path, format='mp3', bitrate=False):
        if self.stopping.is_set():
            return
        try:
            stop = Event()
            start_time = time.time()
            parent_conn, child_conn = Pipe()
            process = Process(target=transcode_process,
                    args=(child_conn, path, stop, format, bitrate))
            process.start()
            while not (self.stopping.is_set() or stop.is_set()):
                data = parent_conn.recv()
                if not data:
                    break
                yield data
            logger.debug("Transcoded %s in %0.2f seconds." % (path.encode(cfg['ENCODING']), time.time() - start_time))
        except GeneratorExit:
            stop.set()
            logger.debug("User canceled the request during transcoding.")
        except:
            stop.set()
            logger.warn("Some type of error occured during transcoding.")
        finally:
            parent_conn.close()
            process.join()
开发者ID:daveisadork,项目名称:Blofeld,代码行数:35,代码来源:transcode.py

示例11: BaseWorker

# 需要导入模块: from multiprocessing import Event [as 别名]
# 或者: from multiprocessing.Event import set [as 别名]
class BaseWorker(Process):
    def __init__(self, *args, **kwargs):
        super(BaseWorker, self).__init__(*args, **kwargs)
        self.should_exit = Event()

    def shutdown(self):
        self.should_exit.set()
开发者ID:jpulec,项目名称:PyQS,代码行数:9,代码来源:worker.py

示例12: runscripts

# 需要导入模块: from multiprocessing import Event [as 别名]
# 或者: from multiprocessing.Event import set [as 别名]
    def runscripts(self,team):
        ip = team['ip']
        if team['team_id'] not in self.run_list:
            self.log.info('No script to run for %s.'%(str(team)))
            return

        self.status['script_tot']=self.status['script_tot']+ len(self.run_list[team['team_id']])

        #sort by service
        ss = {}
        for sid in self.run_list[team['team_id']]:
            sid = int(sid)
            s = self.scripts[sid]
            srvid = s['service_id']
            if srvid in ss:
                ss[srvid].append(sid)
            else:
                ss[srvid] = [sid]
        
        #randomize delay
        for srvid,slist in ss.iteritems():
            #per service
            slock = Event()
            slock.set()
            self.locks.append(slock)
            rlist = self.get_rand_delay(slist)
            for sid,delay in rlist:
                p = self.update_script(team['team_id'],sid,self.scripts[sid]['is_bundle'])
                if p is None:
                    continue
                s = self.scripts[sid]
                self.runscript(slock,team['team_id'],sid,s['service_id'],SCRIPT_TIMEOUT,s['type'],p,ip,self.services[s['service_id']]['port'],delay)
开发者ID:a0xnirudh,项目名称:ictf-framework,代码行数:34,代码来源:scorebot.py

示例13: CaptureProcess

# 需要导入模块: from multiprocessing import Event [as 别名]
# 或者: from multiprocessing.Event import set [as 别名]
class CaptureProcess(Process):
    """A process that fills a queue with images as captured from 
    a camera feed"""
    def __init__(self, capture, imageQueue):
        Process.__init__(self, name="Capture")
        self.imageQueue = imageQueue
        self.capture = capture
        self.keepGoing = Event()
        self.keepGoing.set()
        self.daemon = True

    def run(self):
        print "CaptureProcess pid: %s" % (self.pid,)
        while self.keepGoing.is_set():
            image = captureImage(self.capture)
#            sys.stdout.write(".")
            try:
                self.imageQueue.put(serializeImage(image), block=True, timeout=0.25)
            except FullException:
                try:
                    _ = self.imageQueue.get_nowait()
                except:
                    pass  # Try to clear the queue, but don't worry if someone snatches it first
    def stop(self):
        self.keepGoing.clear()
开发者ID:jbrowne,项目名称:UCSBsketch,代码行数:27,代码来源:ContinuousCapture.py

示例14: DataProcess

# 需要导入模块: from multiprocessing import Event [as 别名]
# 或者: from multiprocessing.Event import set [as 别名]
class DataProcess(Process):
    def __init__(self, data_pipeline, **get_batch_kwargs):
        super(DataProcess, self).__init__(name='neuralnilm-data-process')
        self._stop = Event()
        self._queue = Queue(maxsize=3)
        self.data_pipeline = data_pipeline
        self._get_batch_kwargs = get_batch_kwargs

    def run(self):
        batch = self.data_pipeline.get_batch(**self._get_batch_kwargs)
        while not self._stop.is_set():
            try:
                self._queue.put(batch)
            except AssertionError:
                # queue is closed
                break
            batch = self.data_pipeline.get_batch(**self._get_batch_kwargs)

    def get_batch(self, timeout=30):
        if self.is_alive():
            return self._queue.get(timeout=timeout)
        else:
            raise RuntimeError("Process is not running!")

    def stop(self):
        self._stop.set()
        self._queue.close()
        self.terminate()
        self.join()
开发者ID:asez73,项目名称:neuralnilm,代码行数:31,代码来源:dataprocess.py

示例15: run

# 需要导入模块: from multiprocessing import Event [as 别名]
# 或者: from multiprocessing.Event import set [as 别名]
    def run(self):
        logger = self.ipc_logger()
        input_queue = Queue(20 * self.n_processes)
        done_event = Event()
        processes = [
            ProteinDigestingProcess(
                self.connection, self.hypothesis_id, input_queue,
                self.digestor, done_event=done_event,
                message_handler=logger.sender()) for i in range(
                self.n_processes)
        ]
        protein_ids = self.protein_ids
        i = 0
        n = len(protein_ids)
        chunk_size = 2
        interval = 30
        for process in processes:
            input_queue.put(protein_ids[i:(i + chunk_size)])
            i += chunk_size
            process.start()

        last = i
        while i < n:
            input_queue.put(protein_ids[i:(i + chunk_size)])
            i += chunk_size
            if i - last > interval:
                self.log("... Dealt Proteins %d-%d %0.2f%%" % (
                    i - chunk_size, min(i, n), (min(i, n) / float(n)) * 100))
                last = i

        done_event.set()
        for process in processes:
            process.join()
        logger.stop()
开发者ID:mobiusklein,项目名称:glycan_profiling,代码行数:36,代码来源:peptide_permutation.py


注:本文中的multiprocessing.Event.set方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。