当前位置: 首页>>代码示例>>Python>>正文


Python Queue.close方法代码示例

本文整理汇总了Python中multiprocessing.queues.Queue.close方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.close方法的具体用法?Python Queue.close怎么用?Python Queue.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.queues.Queue的用法示例。


在下文中一共展示了Queue.close方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: TestBlockingMethods

# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import close [as 别名]
class TestBlockingMethods(unittest.TestCase):
    def setUp(self):
        self.quiet=True
        self.random = Random()
        self._timers = []
        self.namespaces = []
        self.iface = PyRQIface(quiet=self.quiet, ref="test")
        self.dummyQueue = Queue()
        self.marshaller = MarshallerFactory.get(MarshallerFactory.DEFAULT, quiet=self.quiet)
        desiredPort = "19001"
        self.r = SubprocessQueueServer(
                                       desiredPort=desiredPort,
                                       handlerClazz=Linkage.create(MockHandler),
                                       quiet=self.quiet
#           includePydevd="/home/francis/.eclipse/org.eclipse.platform_3.7.0_155965261/plugins/org.python.pydev.debug_2.5.0.2012040618/pysrc"
           )
        PyRQIface.setGlobalPYRQ(self.r.details())
        self.r.start().waitUntilRunning()
        pass
    def tearDown(self):
        try:
            self.dummyQueue.close()
            del self.dummyQueue
        except Exception, _e:
            pass
        for namespace in self.namespaces:
            self.iface.setNamespace(namespace)
        try:    self.iface.close()
        except ClosedError, _e:
            pass
开发者ID:sys-git,项目名称:PyRQ,代码行数:32,代码来源:TestQueueBlocking.py

示例2: Multiprocess

# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import close [as 别名]
class Multiprocess(object):
    # THE COMPLICATION HERE IS CONNECTING THE DISPARATE LOGGING TO
    # A CENTRAL POINT


    def __init__(self, functions):
        self.outbound = Queue()
        self.inbound = Queue()
        self.inbound = Queue()

        #MAKE

        #MAKE THREADS
        self.threads = []
        for t, f in enumerate(functions):
            thread = worker(
                "worker " + unicode(t),
                f,
                self.inbound,
                self.outbound,
            )
            self.threads.append(thread)


    def __enter__(self):
        return self

    #WAIT FOR ALL QUEUED WORK TO BE DONE BEFORE RETURNING
    def __exit__(self, a, b, c):
        try:
            self.inbound.close() # SEND STOPS TO WAKE UP THE WORKERS WAITING ON inbound.pop()
        except Exception, e:
            Log.warning("Problem adding to inbound", e)

        self.join()
开发者ID:klahnakoski,项目名称:Bugzilla-ETL,代码行数:37,代码来源:multiprocess.py

示例3: test_multiprocess_tasks

# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import close [as 别名]
def test_multiprocess_tasks():
    wait_until_convenient()
    TAG = "message_q"
    def fetch_task(queue):
        pid = os.getpid()
        count = 0
        for dq in q.listen(TAG, timeout=1):
            s = { 'pid': pid, 'data': dq }
            if dq:
                count += 1
                queue.put(s)
                sleep(uniform(0.1, 0.5)) # sleep 0.1~0.5 seconds randomly
            elif q.count(TAG) == 0:
                return count # the number of tasks done by this process


    test_items = range(0, 10000) # enqueue 10000 tasks
    for i in test_items:
        q.enqueue(TAG, i + 1)

    while q.count(TAG) != len(test_items): # wait until test data is ready
        wait_until_convenient()

    jobs = []
    wait_until_convenient()
    queue = Queue()
    start = timer()
    num_p = 30 # the number of processes to use
    for i in range(0, num_p):
        job = Process(target=fetch_task, args=(queue,))
        jobs.append(job)
        job.start() # start task process

    remaining = q.count(TAG)
    while remaining > 0: # wait until the queue is consumed completely
        remaining = q.count(TAG)
        sys.stdout.write('\rRunning test_multiprocess_tasks - remaining %5d/%5d' % (remaining, len(test_items),))
        sys.stdout.flush()
        wait_until_convenient()

    processed_data = set()
    qsize = 0
    while not queue.empty():
        item = queue.get()
        data = item.get('data')
        qsize += 1
        assert data not in processed_data, "failed test_multiprocess_tasks - data %s has been processed already" % (data, )
        processed_data.add(item.get('data'))

    queue.close()
    queue.join_thread()
    for j in jobs:
        j.join()

    assert qsize == len(test_items), "failed test_multiprocess_tasks - tasks are not complete %d/%d" % (qsize, len(test_items), )
    end = timer()
    print("\rOK test_multiprocess_tasks - %d done in %5d seconds" % (qsize, end - start))
开发者ID:smihica,项目名称:py-q4pg,代码行数:59,代码来源:test.py

示例4: ServerSink

# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import close [as 别名]
class ServerSink(iMockDebuggerSink):
    def __init__(self, peerName, theTime, details, quiet):
        self._peerName = peerName
        self._methods = []
        methods = iMockDebuggerSink()._getMethods()
        self._methods = methods
        self._terminate = False
        self._details = details
        self._qw = None
        self._startMutex = Semaphore(0)
        self._q = Queue()
        self.quiet= quiet
        self._marshaller = MarshallerFactory.get(MarshallerFactory.DEFAULT, quiet=quiet)
        self._qw = QueueWriter(target=details, autoConnect=True, marshaller=self._marshaller, quiet=quiet)
        self._qw.start()
        self.thread = None
    def start(self):
        t = threading.Thread(target=self.run, args=[self._startMutex])
        t.setName("ServerSink.%(P)s"%{"P":self._peerName})
        t.setDaemon(True)
        self.thread = t
        self.thread.start()
        return "server.sink.started"
    def close(self):
        self._terminate = True
        try:    self.thread.join()
        except: pass
        try:    self._qw.close()
        except: pass
        try:    self._q.close()
        except: pass
        return "server.sink.closed"
    def waitUntilRunning(self, block=True, timeout=None):
        self._startMutex.acquire(block=block, timeout=timeout)
        return self
    def __getattribute__(self, name):
        if name in object.__getattribute__(self, "_methods"):
            q = self._q
            def wrapper(self, *args, **kwargs):
                ServerSink._testPickleability((name, args, kwargs))
                q.put((name, args, kwargs))
            return wrapper
        return object.__getattribute__(self, name)
    def run(self, startMutex):
        startMutex.release()
        while self._terminate==False:
            try:
                data = self._q.get(block=True, timeout=1)
            except Empty:   pass
            else:
                ServerSink._testPickleability(data)
                try:
                    self._qw.put(data, block=True, timeout=10)
                except Exception, _e:
                    break
开发者ID:sys-git,项目名称:PyRQ,代码行数:57,代码来源:ServerSink.py

示例5: TestRRQReader

# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import close [as 别名]
class TestRRQReader(_BaseReader):
    def setUp(self):
        self.logger = self._getLogger()
        self.quiet = True
        self._queues = []
        self.random = Random()
        self.timerTerminate = 0
        self._timers = []
        self.namespaces = []
        self.iface = PyRQIface(quiet=self.quiet, ref="default", loggingModule=testLoggingModule)
        self.dummyQueue = Queue()
        self.marshaller = MarshallerFactory.get(MarshallerFactory.DEFAULT, quiet=self.quiet)
        desiredPort = "19001"
        self.r = SubprocessQueueServer(
                                       desiredPort=desiredPort,
                                       quiet=self.quiet,
                                       handlerClazz=Linkage.create(MockHandler),
#           includePydevd="/home/francis/.eclipse/org.eclipse.platform_3.7.0_155965261/plugins/org.python.pydev.debug_2.5.0.2012040618/pysrc"
           )
        PyRQIface.setGlobalPYRQ(self.r.details())
        self.r.start().waitUntilRunning()
        pass
    def tearDown(self):
        self.logger.info("------------ TEST END ------------")
        self.timerTerminate = 1
        time.sleep(2)
        try:
            self.dummyQueue.close()
            del self.dummyQueue
        except Exception, _e:
            pass
        for namespace in self.namespaces:
            self.iface.setNamespace(namespace)
        try:    self.iface.close()
        except ClosedError, _e:
            pass
开发者ID:sys-git,项目名称:PyRQ,代码行数:38,代码来源:TestRRQReader.py

示例6: TestWithTimeouts

# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import close [as 别名]
class TestWithTimeouts(_BaseReader):
    def setUp(self):
        self.logger = self._getLogger()
        self.quiet=True
        self.random = Random()
        self.timerTerminate = 0
        self._timers = []
        self.namespaces = []
        self.dummyQueue = Queue()
        self.iface = PyRQIface(ref="test", quiet=self.quiet, loggingModule=testLoggingModule)
        self.marshaller = MarshallerFactory.get(MarshallerFactory.DEFAULT, quiet=self.quiet)
        desiredPort = "19001"
        self.r = SubprocessQueueServer(
                                       desiredPort=desiredPort,
                                       handlerClazz=Linkage.create(TimeoutMockHandler),
                                       quiet=self.quiet,
           )
        PyRQIface.setGlobalPYRQ(self.r.details())
    def _createInterface(self):
        namespace = self.iface.create()
        self.iface = PyRQIface(namespace=namespace, ref="test", quiet=self.quiet, loggingModule=testLoggingModule)
        self.namespaces.append(namespace)
        return namespace
    def tearDown(self):
        self.timerTerminate = 1
        time.sleep(2)
        try:
            self.dummyQueue.close()
            del self.dummyQueue
        except Exception, _e:
            pass
        for namespace in self.namespaces:
            self.iface.setNamespace(namespace)
        try:    self.iface.close()
        except ClosedError, _e:
            pass
开发者ID:sys-git,项目名称:PyRQ,代码行数:38,代码来源:TestRRQReader.py

示例7: FileSink

# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import close [as 别名]
class FileSink(iMockDebuggerSink):
    def __init__(self, peerName, theTime, filename, quiet):
        self._peerName = peerName
        self._fp = open(filename, "w")
        self.fp.write("File debugger started at: %(T)s for client: %(C)s"%{"T":theTime, "C":peerName})
        self.fp.flush()
        self._methods = []
        methods = iMockDebuggerSink()._getMethods()
        self._methods = methods
        self._terminate = False
        self.quiet=  quiet
        self._startMutex = Semaphore(0)
        self._q = Queue()
        self.thread = None
    def start(self):
        t = threading.Thread(target=self.run, args=[self._startMutex])
        t.setName("FileSink.%(P)s"%{"P":self._peerName})
        t.setDaemon(True)
        self.thread = t
        self.thread.start()
        return "file.sink.started"
    def close(self):
        self._terminate = True
        try:    self.thread.join()
        except: pass
        try:    self._fp.close()
        except: pass
        try:    self._fp.close()
        except: pass
        try:    self._q.close()
        except: pass
        self._fp = None
        return "file.sink.closed"
    def waitUntilRunning(self, block=True, timeout=None):
        self._startMutex.acquire(block=block, timeout=timeout)
        return self
    def __getattribute__(self, name):
        if name in object.__getattribute__(self, "_methods"):
            q = self._q
            def wrapper(self, *args, **kwargs):
                q.put((name, args, kwargs))
            return wrapper
        return object.__getattribute__(self, name)
    def run(self, startMutex):
        startMutex.release()
        while self._terminate==False:
            try:
                data = self._q.get(block=True, timeout=1)
            except Empty:   pass
            else:
                try:
                    (methodName, args, kwargs) = data
                    peerName = args[0]
                    relativeTime = args[1]
                    args = args[2:]
                    ss = ["PEER:", peerName, "REL-TIME:", relativeTime, "METHOD", methodName, "ARGS:", str(args), "KWARGS", str(kwargs)]
                    s = "\n".join(ss)
                except: pass
                else:
                    try:
                        self._fp.write(s)
                    except:
                        break
开发者ID:sys-git,项目名称:PyRQ,代码行数:65,代码来源:FileSink.py

示例8: MultiCoreEngine

# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import close [as 别名]
class MultiCoreEngine():
    
    _mapred = None
    
    _out_queue = None
    _in_queue = None
    _log_queue = None
        
    _processes = None
    
        
    def __init__(self,mapred):
        self._mapred = mapred
            
    def _start(self,name,cpu, module_name, class_name, params):
        fn = None
        
        self._processes = []
        self._in_queue = Queue()
        self._out_queue = Queue()
        self._log_queue = Queue()
        
        if name == "mapper":
            fn = q_run_mapper
        elif name == "reducer":
            fn = q_run_reducer
        
        for i in range(cpu):
            process = Process(target=fn,args=(module_name, class_name ,params, self._in_queue, self._out_queue, self._log_queue))
            self._processes.append(process)
            process.start()
    
    def _stop(self):
        
        for process in self._processes:
            self._in_queue.put("STOP")
        
        while not self._log_queue.empty():
            print self._log_queue.get()
    
    def _get_data_chunks(self):
        chunks = []
        for process in self._processes:
            chunks.append(self._out_queue.get())
        
        return chunks
    
    def _set_data_chunks(self, chunks):
        
        map(self._in_queue.put,chunks)
        
                        
    def _send_lines(self,lines, cpu, lines_len ):
        line_splits = [lines[i* lines_len / cpu : (i+1)* lines_len / cpu] for i in range(cpu) ]
                    
        for i in range(cpu): 
            self._in_queue.put(line_splits[i])
    
    def _terminate(self):
        for process in self._processes:
            process.join()
            process.terminate()
                
        self._in_queue.close()
        self._out_queue.close()
        self._processes = None
        
    def _force_terminate(self):
        for process in self._processes:
            process.terminate()
            
    def _merge_data(self, data):
       
        self._mapred.data = merge_kv_dict(self._mapred.data,data)
                
    def _merge_reduced_data(self, data):
       
        self._mapred.data_reduced = merge_kv_dict(self._mapred.data_reduced,data)
                
    def _split_data(self, num_splits):
        splits = []
        index = 0
        
        len_data = len(self._mapred.data)
        
        chunk_len = int(math.ceil(len_data / float(num_splits)))
        
        if chunk_len == 0:
            splits.append(self._mapred.data)
        else:        
            for i in range(int(math.ceil(len_data/float(chunk_len)))):
                splits.append({})
                
            for (key, value) in self._mapred.data.items():
                
                i = int(math.floor(index / float(chunk_len)))
                       
                splits[i][key]=value
                
                index = index + 1
#.........这里部分代码省略.........
开发者ID:jucabot,项目名称:polymr,代码行数:103,代码来源:smp.py

示例9: SSHClient

# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import close [as 别名]
class SSHClient(Process):
    TIMEOUT = 10
    PING_RECEIVED = re.compile("1 received")

    def __init__(self, username, password, host, cmdsToSend, port = 22, exitCmd = "exit", timeout = None):
        Process.__init__(self, name = "SSHClient")

        self.logger = LogManager().getLogger('SSHClient-%s' % host)
        self.username = username
        self.password = password
        self.host = host
        self.port = int(port)
        self.cmdsToSend = cmdsToSend if isinstance(cmdsToSend, list) else [cmdsToSend]
        self.exitCmd = exitCmd

        self.queue = Queue()
        self.msg = ""
        self.status = Status.FAILURE
        self.startTime = Value('d', 0.0)
        self.endTime = Value('d', 0.0)
        self.timeout = timeout or SSHClient.TIMEOUT
        self.cmdsSend = False
        self.start()

    def isFinished(self):
        """ True if the process has finished """
        return not self.is_alive()

    def updateOutput(self):
        """
        Update the msg to include the latest
        output from the given commands
        """
        try:
            while True:
                msg = self.queue.get(timeout = 0.5)
                self.msg += msg
        except Empty: pass
        except IOError: pass

        if self.isFinished():
            self.queue.close()
        return self.msg

    def run(self):
        factory = SSHFactory(self)
        factory.protocol = ClientTransport
        reactor.connectTCP(self.host, self.port, factory)

        self.startTime.value = time.time()
        check = task.LoopingCall(self.__ping)
        check.start(2.0)
        reactor.callLater(self.timeout, self.__timeout)
        log.defaultObserver.stop()
        reactor.run()
        self.endTime.value = time.time()
        self.queue.close()
        sys.exit(self.status)

    def __timeout(self):
        """ Timeout checker """
        if self.status != Status.FAILURE:
            return

        self.logger.error('Connection timeout to peer %s:%s'
            %(self.host, self.port))
        reactor.stop()

    def __ping(self):
        with open('/dev/null') as null:
            ping = subprocess.Popen(["ping", "-c1", "-W1", self.host],
                stdout = null, stderr = null)
            ping.wait()

        if ping.returncode != 0 and reactor.running:
            if self.cmdsSend == False:
                self.status = Status.FAILURE
            reactor.stop() 

    def cleanup(self):
        self.queue.close()

    def shutdown(self):
        """ Terminate the SSH process """
        self.terminate()
        self.join()
        self.endTime.value = time.time()
开发者ID:sys-git,项目名称:YATES,代码行数:89,代码来源:SSH.py

示例10: PmakeManager

# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import close [as 别名]

#.........这里部分代码省略.........
    def instance_job(self, job_id):
        publish(self.context, 'worker-status', job_id=job_id,
                status='apply_async')
        assert len(self.sub_available) > 0
        name = sorted(self.sub_available)[0]
        self.sub_available.remove(name)
        assert not name in self.sub_processing
        self.sub_processing.add(name)
        sub = self.subs[name]

        self.job2subname[job_id] = name

        if self.new_process:
            f = parmake_job2_new_process
            args = (job_id, self.context)

        else:
            f = parmake_job2
            args = (job_id, self.context,
                    self.event_queue_name, self.show_output)

        async_result = sub.apply_async(f, args)
        return async_result

    def event_check(self):
        if not self.show_output:
            return
        while True:
            try:
                event = self.event_queue.get(block=False)  # @UndefinedVariable
                event.kwargs['remote'] = True
                broadcast_event(self.context, event)
            except Empty:
                break

    def process_finished(self):
        if self.cleaned:
            return
        self.cleaned = True
        # print('process_finished()')

        for name in self.sub_processing:
            self.subs[name].proc.terminate()

        for name in self.sub_available:
            self.subs[name].terminate()

        # XXX: in practice this never works well
        if False:
            # print('joining')
            timeout = 1
            for name in self.sub_available:
                self.subs[name].proc.join(timeout)

        # XXX: ... so we just kill them mercilessly
        if True:
            #  print('killing')
            for name in self.sub_processing:
                pid = self.subs[name].proc.pid
                os.kill(pid, signal.SIGKILL)

                #print('process_finished() finished')
        
        self.event_queue.close()
        del PmakeManager.queues[self.event_queue_name]
        

    # Normal outcomes    
    def job_failed(self, job_id, deleted_jobs):
        Manager.job_failed(self, job_id, deleted_jobs)
        self._clear(job_id)

    def job_succeeded(self, job_id):
        Manager.job_succeeded(self, job_id)
        self._clear(job_id)

    def _clear(self, job_id):
        assert job_id in self.job2subname
        name = self.job2subname[job_id]
        del self.job2subname[job_id]
        assert name in self.sub_processing
        assert name not in self.sub_available
        self.sub_processing.remove(name)
        self.sub_available.add(name)

    def host_failed(self, job_id):
        Manager.host_failed(self, job_id)

        assert job_id in self.job2subname
        name = self.job2subname[job_id]
        del self.job2subname[job_id]
        assert name in self.sub_processing
        assert name not in self.sub_available
        self.sub_processing.remove(name)

        # put in sub_aborted
        self.sub_aborted.add(name)

    def cleanup(self):
        self.process_finished()
开发者ID:p-muller,项目名称:compmake,代码行数:104,代码来源:pmake_manager.py

示例11: MVacManager

# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import close [as 别名]

#.........这里部分代码省略.........
                #     f = parmake_job2_new_process
                #     args = (job_id, self.context)
                # 
                # else:
                f = parmake_job2
                args = (job_id, self.context,
                        self.event_queue_name, self.show_output)
            else:
                f = mvac_job
                args = (job_id, self.context,
                        self.event_queue_name, self.show_output,
                        self.volumes, os.getcwd())
    
        if True:
            async_result = sub.apply_async(f, args)
        else:
            warnings.warn('Debugging synchronously')
            async_result = f(args)
            
        return async_result

    def event_check(self):
        if not self.show_output:
            return
        while True:
            try:
                event = self.event_queue.get(block=False)  # @UndefinedVariable
                event.kwargs['remote'] = True
                broadcast_event(self.context, event)
            except Empty:
                break

    def process_finished(self):
        if self.cleaned:
            return
        self.cleaned = True
        
        #print('Clean up...') 
        
        for name in self.sub_processing:
            self.subs[name].proc.terminate()

        for name in self.sub_available:
            self.subs[name].terminate()

        elegant = False
        # XXX: in practice this never works well
        if elegant:
            timeout = 1
            for name in self.sub_available:
                self.subs[name].proc.join(timeout)
            
        # XXX: ... so we just kill them mercilessly
        else:
            #  print('killing')
            for name in self.sub_processing:
                pid = self.subs[name].proc.pid
                os.kill(pid, signal.SIGKILL)

        self.event_queue.close()
        self.signal_queue.close()
        from compmake.plugins.backend_pmake.pmake_manager import PmakeManager
        del PmakeManager.queues[self.event_queue_name]
        

    # Normal outcomes    
    def job_failed(self, job_id, deleted_jobs):
        Manager.job_failed(self, job_id, deleted_jobs)
        self._clear(job_id)

    def job_succeeded(self, job_id):
        Manager.job_succeeded(self, job_id)
        self._clear(job_id)

    def _clear(self, job_id):
        assert job_id in self.job2subname
        name = self.job2subname[job_id]
        del self.job2subname[job_id]
        del self.subname2job[name]
        assert name in self.sub_processing
        assert name not in self.sub_available
        self.sub_processing.remove(name)
        self.sub_available.add(name)

    def host_failed(self, job_id):
        Manager.host_failed(self, job_id)

        assert job_id in self.job2subname
        name = self.job2subname[job_id]
        del self.job2subname[job_id]
        del self.subname2job[name]
        assert name in self.sub_processing
        assert name not in self.sub_available
        self.sub_processing.remove(name)

        # put in sub_aborted
        self.sub_aborted.add(name)

    def cleanup(self):
        self.process_finished()
开发者ID:p-muller,项目名称:compmake,代码行数:104,代码来源:mvac_manager.py

示例12: ApiBase

# 需要导入模块: from multiprocessing.queues import Queue [as 别名]
# 或者: from multiprocessing.queues.Queue import close [as 别名]
class ApiBase(iApi, iIpcTransportDataReceiveListener):
    r"""
    @summary: The base-class to the top-level api object ONLY, not sub-apis.
    """
    DEFAULT_MAX_ASYNC_HANDLERS = 1
    def __init__(self, name, ns="", solicited=True, ignoreUnhandled=False, maxAsync=None):
        super(ApiBase, self).__init__(ns=ns, solicited=solicited, name=name)
        self._setup(ns=self._getNamespace(), solicited=self.solicited, ipc=self.ipc)
        self._dataRxCount = itertools.count(0)
        self._ignoreUnhandled = ignoreUnhandled
        if (maxAsync == None) or (maxAsync < 1):
            maxAsync = ApiBase.DEFAULT_MAX_ASYNC_HANDLERS
        self._maxAsync = maxAsync
        self._q = Queue()
        self._workers = []
        self._createAsyncWorkers()
        self.isAlive = True
    def _createAsyncWorkers(self, start=True):
        #    Create the thread pool to handle the api calls.
        for _ in range(0, self._maxAsync):
            thread = ApiAsyncWorker.create(self._q, self, start=start)
            self._workers.append(thread)
        self._logger.debug("Created workers.")
    def __del__(self):
        self.teardown()
    def teardown(self):
        if self is threading.current_thread(): return 
        if not self.isAlive: return
        self.isAlive = False
        #    Unfortunately we require time to stop the workers.
        self._logger.debug("Stopping async workers...")
        for _ in range(0, self._maxAsync):
            self._q.put(STOP())
        time.sleep(1)
        for worker in self._workers:
            worker.stop()
        for worker in self._workers:
            if worker.isAlive(): worker.join()
        self._workers = []
        self._q.close()
        time.sleep(1)
        del self._q
        self._q = None
        self._logger.debug("Stopped async workers (all daemon anyway).")
        #    Now un-bind our data-receive listener from the IPC:
        if self._ipc != None:
            self._ipc.setTransportDataReceiveListener(self)
        self._ipc = None
    def _newIpc(self):
        super(ApiBase, self)._newIpc()
        #    Now bind our data-receive listener to the IPC:
        self._ipc.setTransportDataReceiveListener(self)
    def transportDataReceive(self, tId, data):
        r"""
        @summary: Data is received that is NOT part of an existing transaction.
        We need to decide what to do with it...
        Recursively ask each of our sub-api's to decode the data and handle it.
        If no one can, then return UnsupportedApiError() (unless we consume it with:
        self._ignoreUnhandled==True).
        The handlers will have previously been set by the controlling entity, ie:
        ExecutionOrganiser, Head.
        This method always returns NoResponseRequired, making the call asynchronous.
        """
        myNsPrefix = self._getNamespacePrefix()
        try:
            count = self._dataRxCount.next()
            if isinstance(data, iApiTransportItem):
                ns = data.ns()
                if self._isInMyNamespace(ns):
                    self._findHandler(ns)
                    args = data.args()
                    kwargs = data.kwargs()
                    synchronous = True
                    self._q.put(KNOWN(ns, tId, synchronous, count, args, kwargs))
                    raise NoResponseRequired(ns)
            else:
                #    Inform our listener about the data that we can't handle:
                handler = self.transportDataReceiveListener
                if handler != None:
                    self._q.put(UNKNOWN(tId, data))
                raise NoResponseRequired(myNsPrefix)
        except UnsupportedApiError, e:
            if self._ignoreUnhandled == False:
                #    Propagate exception directly as before.
                raise
            #    Consume silently:
            self._logger.debug("UnsupportedApiError: %(NS)s" % {"NS":e.ns()})
            raise NoResponseRequired(myNsPrefix, e)
开发者ID:sys-git,项目名称:epyrpc,代码行数:90,代码来源:ApiBase.py


注:本文中的multiprocessing.queues.Queue.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。