当前位置: 首页>>代码示例>>Python>>正文


Python Queue.join_thread方法代码示例

本文整理汇总了Python中multiprocessing.Queue.join_thread方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.join_thread方法的具体用法?Python Queue.join_thread怎么用?Python Queue.join_thread使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.Queue的用法示例。


在下文中一共展示了Queue.join_thread方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _workerQpushTimer

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import join_thread [as 别名]
class _workerQpushTimer():
    def __init__(self):
	self.syncPeriod = 2 
	self.timer = None
	self.Qinit()
    def Qinit(self):
	self.syncTmpQ = Queue()
    # flush remain items in queue, and then close and join_thread
    def Qflush(self):
	while True:
	    try:
		self.syncTmpQ.get(True, comm.FLUSH_TIMEOUT)
	    except Empty:
		break
	self.syncTmpQ.close()
	self.syncTmpQ.join_thread()
    def enableTimer(self, workerPool):
	self.timer = Timer(self.syncPeriod, self.pushToWorkerQ, [workerPool])
	self.timer.start()
    def disableTimer(self):
	if self.timer is not None:
	    self.timer.cancel()
    # function executed periodically, used to sync queue between main process queue and worker queue
    def pushToWorkerQ(self, workerPool):
	while not comm.done.value:
	    try:
		item = self.syncTmpQ.get_nowait() 
		for w in workerPool:
		    w.queue.put_nowait(item)
	    except Empty:
		break
	if not comm.done.value:
	    self.enableTimer(workerPool)
开发者ID:mingchen-chung,项目名称:JobSeeking,代码行数:35,代码来源:client.py

示例2: test_transaction_large

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import join_thread [as 别名]
 def test_transaction_large(self) -> None:
     queue = Queue()  # type: Queue[str]
     msg = 't' * 100001  # longer than the max read size of 100_000
     p = Process(target=server, args=(msg, queue), daemon=True)
     p.start()
     connection_name = queue.get()
     with IPCClient(connection_name, timeout=1) as client:
         assert client.read() == msg.encode()
         client.write(b'test')
     queue.close()
     queue.join_thread()
     p.join()
开发者ID:chadrik,项目名称:mypy,代码行数:14,代码来源:testipc.py

示例3: main

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import join_thread [as 别名]
def main():
    # build_proxy()
    queue = Queue(2048)
    for i in range(128):
        Process(target=retrieve_from_queue, args=(queue,)).start()

    with open('samples.log') as f:
        process(f, queue)

    queue.close()
    queue.join_thread()
    # pool.close()
    pool.join()
开发者ID:0312birdzhang,项目名称:crawl_imgs,代码行数:15,代码来源:crawl_imgs.py

示例4: Layer2Worker

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import join_thread [as 别名]
class Layer2Worker():
    def __init__(self):
	self.queue = Queue()
    # close and join_thread queue
    def Qflush(self):
	while True:
	    time.sleep(comm.FLUSH_TIMEOUT)
	    if self.queue.qsize() == 0:
		break
	self.queue.close()
	self.queue.join_thread()
    def saveProcess(self, process):
	self.process = process
开发者ID:mingchen-chung,项目名称:JobSeeking,代码行数:15,代码来源:client.py

示例5: test_queue

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import join_thread [as 别名]
def test_queue():
	q=Queue()
	#procLst=[Process(target=p, args=(q,) ) for p in [prod1, prod2, consum1, consum2]]
	pLst=[Process(target=p, args=(q,) ) for p in [prod1, prod2]]
	cLst=[Process(target=p, args=(q,) ) for p in [consum1, consum2]]
	procLst=pLst+cLst
	for pp in procLst:
		pp.start()
#	for pp in pLst:
#		pp.join()
#	q.put('STOP')
	q.close()
#	print 'Queue is closed'
	q.join_thread()
开发者ID:JasonYang007,项目名称:python,代码行数:16,代码来源:myGUI.py

示例6: Director

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import join_thread [as 别名]
class Director(object):
    def __init__(self, producer, consumer):
        self.producer = producer
        self.consumer = consumer
        self.queue = Queue()
        self.prod_proc = Process(target = self.produce)
        self.prod_proc.daemon = True
        self.lock = Lock()
        self.done = Value('b')
        self.done.value = 0

    def start(self):
        self.prod_proc.start()

    def step(self):
        self.lock.acquire()
        done = (self.done.value != 0)
        self.lock.release()
        if done:
            raise Done
        try:
            data = self.queue.get(block = True, timeout = 1.0)
            self.consumer.consume(data)
        except Empty:
            pass

    def stop(self):
        self.prod_proc.join()

    def run(self):
        self.start()
        while True:
            try:
                self.step()
            except Done:
                break
        self.stop()
        
    def produce(self):
        try:
            while True:
                data = self.producer.produce()
                self.queue.put(data)
        except:
            self.lock.acquire()
            self.done.value = 1
            self.lock.release()
            self.queue.close()
            self.queue.join_thread()
开发者ID:supersasha,项目名称:polygon-py,代码行数:51,代码来源:mpdata.py

示例7: test_connect_twice

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import join_thread [as 别名]
    def test_connect_twice(self) -> None:
        queue = Queue()  # type: Queue[str]
        msg = 'this is a test message'
        p = Process(target=server, args=(msg, queue), daemon=True)
        p.start()
        connection_name = queue.get()
        with IPCClient(connection_name, timeout=1) as client:
            assert client.read() == msg.encode()
            client.write(b'')  # don't let the server hang up yet, we want to connect again.

        with IPCClient(connection_name, timeout=1) as client:
            assert client.read() == msg.encode()
            client.write(b'test')
        queue.close()
        queue.join_thread()
        p.join()
        assert p.exitcode == 0
开发者ID:chadrik,项目名称:mypy,代码行数:19,代码来源:testipc.py

示例8: main

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import join_thread [as 别名]
def main():
    #build_proxy()
    #pool = Pool(processes=512)
    #m = Manager()
    queue = Queue(2048)
    pool = []*10
    for i in range(128):
        p = Process(target=retrieve_from_queue, args=(queue,))
        p.start()
    exist_file = 0
    socket.setdefaulttimeout(3)
    with open('samples.log') as f:
        for index, line in enumerate(f):
            if index % 1000 == 0:
                print index, line
            try:
                args = line.split()
                if len(args) == 2:
                    count, url = args
                else:
                    count = args[0]
                    url = ''.join(args[1:])
            except Exception as e:
                print 'exception:', str(e), '|', line
                continue

            # print 'main:', count, url
            fname = urlparse(url).path.split('/')[-1]
            path = './imgs/'+str(index)+'.'+count+'.'+fname

            '''
            result = pool.apply_async(
                    retrieve,
                    args=(url, path, queue),
                    callback=callback
            )
            '''
            queue.put((url, path))
        print 'apply async done'
    queue.close()
    queue.join_thread()
    # pool.close()
    pool.join()
    for e in exceptions:
        print e
开发者ID:Eric--,项目名称:cowry,代码行数:47,代码来源:thread-pool-apply-async.py

示例9: ProcessExecutor

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import join_thread [as 别名]
class ProcessExecutor(object):
    def __init__(self):
        self.processes = []
        self.q = Queue()

    def wait_until_finished(self):
        for _process in self.processes:
            _process.join()
        self.q.close()
        self.q.join_thread()

    def execute(self, fn, *args, **kwargs):
        promise = Promise()

        self.q.put([promise, fn, args, kwargs], False)
        _process = Process(target=queue_process, args=(self.q))
        _process.start()
        self.processes.append(_process)
        return promise
开发者ID:graphql-python,项目名称:graphql-core,代码行数:21,代码来源:process.py

示例10: ProcessHandler

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import join_thread [as 别名]
class ProcessHandler(object):
	def __init__(self, functions):
		self.processes = []
		self.num_of_processes = len(functions)
		self.functions = functions
		self.event_q = Queue()

	def start(self):
		# create/start processes and event queue
		for i in range(self.num_of_processes):
			function = self.functions[i]
			name = self.functions[i].__name__
			print function
			self.processes.append(Process(target=function, name=name, args=(self.event_q,)))
			
			self.processes[i].start()
			while not self.processes[i].is_alive():
				time.sleep(0.01)
			print self.processes[i]

	def close(self):
		for i in range(self.num_of_processes):
			self.event_q.close()
			self.event_q.join_thread()
			self.processes[i].join()
			print self.processes[i]

	def watchDog(self):
		for i in range(self.num_of_processes):
			if not self.processes[i].is_alive():
				print self.processes[i]
				function = self.functions[i]
				name = self.functions[i].__name__
				q = self.event_q
				self.processes[i] = Process(target=function, name=name, args=(q,))
				self.processes[i].start()
				while not self.processes[i].is_alive():
					time.sleep(0.1)
				print self.processes[i]
开发者ID:avninja,项目名称:utils,代码行数:41,代码来源:processhandler.py

示例11: glFlush

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import join_thread [as 别名]
        glFlush()
        pygame.display.flip()
        try:
            result = queue_locations.get(False, 0.1)
            locations.extend(result)
            print "MOTHER > There are %d locations saved ... :)" % (len(locations))
            print "MOTHER > last location:\n", pformat(locations[-1])
            # current_location = len(locations)-1
            no_data = 0
        except QEmpty:
            if not e_startup.is_set():
                p_gps_upd.start()
                e_startup.set()
            no_data += r_chrono
            pass

        chrono_tmp = chrono
        chrono = pygame.time.get_ticks() / 1000.0

    print "MOTHER : closing queue locations"
    queue_locations.close()
    print "MOTHER : waiting end of queue"
    queue_locations.join_thread()
    print "MOTHER : set stopped event"
    e_stopped.set()
    print "MOTHER : waiting for GPSUPDATER to end"
    p_gps_upd.join()
    print "MOTHER : quit PyGame"
    pygame.quit()
开发者ID:kiniou,项目名称:roam-gps-reader,代码行数:31,代码来源:roam-gps-reader.py

示例12: main

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import join_thread [as 别名]
def main(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument('--server', default=DEFAULT_SERVER,
                        help=u'Elasticsearch hostname or IP (default {0})'.format(DEFAULT_SERVER))
    parser.add_argument('--port', default=DEFAULT_PORT,
                        help=u'Elasticsearch port (default {0})'.format(DEFAULT_PORT))
    parser.add_argument('--scanfile', help=u'Path to umich scan file you are ingesting. '
                                           u'Please make sure to decompress it')
    parser.add_argument('--initial', help=u'If this is the first file you are importing please use this flag',
                        action='store_true')
    args = parser.parse_args(argv[1:])

    if args.scanfile is None:
        logger.error("Please include a scanfile")
        sys.exit(1)

    workers = cpu_count()
    process_hosts_queue = Queue(maxsize=20000)
    process_certs_queue = Queue(maxsize=20000)

    for w in xrange(workers/2):
        #  Establish elasticsearch connection for each process
        es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=30)
        p = Process(target=process_hosts, args=(process_hosts_queue, es, args.initial))
        p.daemon = True
        p.start()

    for w in xrange(workers/2):
        #  Establish elasticsearch connection for each process
        es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=30)
        p = Process(target=process_scan_certs, args=(process_certs_queue, es))
        p.daemon = True
        p.start()

    logger.warning("Starting processing of {file} at {date}".format(file=args.scanfile, date=datetime.now()))

    # This is the bottle neck of the process but it works for now
    parse_scanfile(args.scanfile, process_hosts_queue, process_certs_queue)

    #  Once all the json lines have been put onto the queue. Add DONE so the queue workers know when to quit.
    for w in xrange(workers):
        process_hosts_queue.put("DONE")
        process_certs_queue.put("DONE")

    #  Close out the queue we are done
    process_hosts_queue.close()
    process_hosts_queue.join_thread()
    process_certs_queue.close()
    process_certs_queue.join_thread()

    #  this is kinda dirty but without looking up everything at insert time (slow) I don't know of a better way to do
    #  this based on the number of documents we will have
    refresh_es = Elasticsearch([{u'host': args.server, u'port': args.port}], timeout=30)
    # construct an elasticsearch query where the filter is looking for any entry that is missing the field first_seen
    q = {'size': 500, "query": {"match_all": {}}, "filter": {"missing": {"field": "first_seen"}}}

    new_updates = refresh_es.search(index='passive-ssl-hosts-umich', body=q)

    logger.warning("Numer of hosts to update is {count}".format(count=new_updates['hits']['total']))

    # Scan across all the documents missing the first_seen field and bulk update them
    missing_first_seen = scan(refresh_es, query=q, scroll='30m', index='passive-ssl-hosts-umich')

    bulk_miss = []
    for miss in missing_first_seen:
        last_seen = miss['_source']['last_seen']
        first_seen = last_seen
        action = {"_op_type": "update", "_index": "passive-ssl-hosts-umich", "_type": "host", "_id": miss['_id'],
                  "doc": {'first_seen': first_seen}}
        bulk_miss.append(action)
        if len(bulk_miss) == 500:
            bulk(refresh_es, bulk_miss)
            bulk_miss = []

    #  Get the remaining ones that are less than 000 and the loop has ended
    bulk(refresh_es, bulk_miss)
    logger.warning("{file} import finished at {date}".format(file=args.scanfile, date=datetime.now()))

    # Now we should optimize each index to max num segments of 1 to help with searching/sizing and just over all
    # es happiness
    logger.warning("Optimizing index: {index} at {date}".format(index='passive-ssl-hosts-umich', date=datetime.now()))
    refresh_es.indices.optimize(index='passive-ssl-hosts-umich', max_num_segments=1, request_timeout=7500)
    logger.warning("Optimizing index: {index} at {date}".format(index='passive-ssl-certs-umich', date=datetime.now()))
    refresh_es.indices.optimize(index='passive-ssl-certs-umich', max_num_segments=1, request_timeout=7500)
开发者ID:fork42541,项目名称:scansio-sonar-es,代码行数:86,代码来源:umich_daily.py

示例13: anything

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import join_thread [as 别名]
class Worker:
    """This class is used for poller and reactionner to work.
    The worker is a process launch by theses process and read Message in a Queue
    (self.s) (slave)
    They launch the Check and then send the result in the Queue self.m (master)
    they can die if they do not do anything (param timeout)

    """

    _id = 0  # None
    _process = None
    _mortal = None
    _idletime = None
    _timeout = None
    _control_q = None

    def __init__(self, _id, slave_q, returns_queue, processes_by_worker, mortal=True, timeout=300,
                 max_plugins_output_length=8192, target=None, loaded_into='unknown',
                 http_daemon=None):
        self._id = self.__class__._id
        self.__class__._id += 1

        self._mortal = mortal
        self._idletime = 0
        self._timeout = timeout
        self.slave_q = None
        self.processes_by_worker = processes_by_worker
        self._control_q = Queue()  # Private Control queue for the Worker
        # By default, take our own code
        if target is None:
            target = self.work
        self._process = Process(target=target, args=(slave_q, returns_queue, self._control_q))
        self.returns_queue = returns_queue
        self.max_plugins_output_length = max_plugins_output_length
        self.i_am_dying = False
        # Keep a trace where the worker is launch from (poller or reactionner?)
        self.loaded_into = loaded_into
        if os.name != 'nt':
            self.http_daemon = http_daemon
        else:  # windows forker do not like pickle http/lock
            self.http_daemon = None

    def is_mortal(self):
        """
        Accessor to _mortal attribute

        :return: A boolean indicating if the worker is mortal or not.
        :rtype: bool
        """
        return self._mortal

    def start(self):
        """
        Start the worker. Wrapper for calling start method of the process attribute

        :return: None
        """
        self._process.start()

    def terminate(self):
        """
        Wrapper for calling terminate method of the process attribute
        Also close queues (input and output) and terminate queues thread

        :return: None
        """
        # We can just terminate process, not threads
        self._process.terminate()
        # Is we are with a Manager() way
        # there should be not such functions
        if hasattr(self._control_q, 'close'):
            self._control_q.close()
            self._control_q.join_thread()
        if hasattr(self.slave_q, 'close'):
            self.slave_q.close()
            self.slave_q.join_thread()

    def join(self, timeout=None):
        """
         Wrapper for calling join method of the process attribute

        :param timeout: time to wait for the process to terminate
        :type timeout: int
        :return: None
        """
        self._process.join(timeout)

    def is_alive(self):
        """
        Wrapper for calling is_alive method of the process attribute

        :return: A boolean indicating if the process is alive
        :rtype: bool
        """
        return self._process.is_alive()

    def is_killable(self):
        """
        Determine whether a process is killable :

#.........这里部分代码省略.........
开发者ID:gst,项目名称:alignak,代码行数:103,代码来源:worker.py

示例14: __init__

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import join_thread [as 别名]

#.........这里部分代码省略.........
                return
            print >>sys.stderr, 'record', self.record
            dutset = {'last_finish_time':time()}
            if not self.record:
                return
            upd = {'end_time': time(), 'modification_time':time()}

            if value: # i.e. , if test failed:
                upd['failure'] = repr(value)
                upd['exception'] = value.__class__.__name__
                if not isinstance(value, KeyboardInterrupt):
                    print 'HEADLINE: exception', upd['exception'], value
                    for clause in format_exception(_type, value, traceback):
                        for line in clause.split('\n'):
                            print 'CRASH:', line
                else:
                    upd['infrastructure_problem'] = True
                    upd['whiteboard'] = '[infrastructure] test interrupted'
                if self.reinstall_on_failure:
                    dutset['test_failed'] = True
                    tnext = time() + 300
                    print 'INFO: test failed, so will reinstall machine at', \
                        asctime(localtime(tnext))

            if self.failed: #some test suite failed
                upd['failure'] = 'test failed'

            self.mdb.results.update({'_id':self.result_id}, {'$set':upd})
            classify = process_result(self.mdb.results.find_one({'_id':self.result_id}))
            print 'HEADLINE:', classify, self.full_description()

            get_track().updates.save({'result_id':self.result_id,
                                      'action':'experiment finished'})

            if self.dut_id:
                self.mdb.duts.update({'_id':self.dut_id}, 
                                     {'$unset': {'control_pid':1, 'result_id':1,
                                                 'control_command_line':1},
                                      '$set': dutset})
            if self.build:
                recount(self.build)
            if classify == 'infrastructure_problems':
                pass
            else:
                col = 'green' if classify == 'passes' else 'red'
        finally:
            if self.record_queue:
                self.record_queue.put('finish')
                self.record_queue.close()
                self.record_queue.join_thread()
            if self.stream_process:
                self.stream_process.join()
            if self.stdout_filter:
                self.stdout_filter.del_callback(self)
            
    def full_description(self):
        """Work out a full test description"""
        des = describe_dut(self.dut) if self.dut else ''
        if self.build:
            des += ' with ' + self.build
        if self.result_id:
            des += ' BVT result ID ' + str(self.result_id)
        return (self.description if self.description 
                else 'unknown test') + ' on ' +  des
    def set_description(self, description):
        """Set description (sometimes this is not known at the start
        of the test"""
        self.description = description
        if not self.record:
            return
        self.mdb.results.update({'_id':self.result_id}, 
                                 {'$set':{'test_case':description}})
    def set_build(self, build):
        """Set build (sometimes this is not known at the start of the test"""
        self.build = build
        if not self.record:
            return
        self.mdb.results.update({'_id':self.result_id}, 
                                {'$set':{'build':build}})


    # Update the time, result, and failure reason for a step in a test suite.
    def update_step(self, suite, i, result, reason=''):
        self.mdb.suiteresults.update({'result_id':self.result_id, 'suite':suite},
                                     {'$set':{'step'+str(i):result, 'step%s-reason'%str(i):reason,
                                                'step%s-end'%str(i):time()}})

    # Update the end time, result, and failure reason for a test suite
    def update_suite(self, suite, result, reason=''):
        self.mdb.suiteresults.update({'result_id':self.result_id, 'suite':suite},
                                     {'$set':{'result':result, 'finish_time':time(), 'reason':reason}})

    # Allocate the initial doc in the suiteresults collection in mongo for this run.
    def gen_suite_log(self, suite, num_steps):
        self.mdb.suiteresults.save({'result_id':self.result_id, 'suite':suite, 'steps':num_steps, 'start_time':time()})   
    
    # Indicate the start time for one step in a test suite
    def step_start(self, suite, i):
        self.mdb.suiteresults.update({'result_id':self.result_id, 'suite':suite},
                                     {'$set':{'step%s-start'%str(i):time()}})
开发者ID:OpenXT,项目名称:bvt,代码行数:104,代码来源:record_test.py

示例15: buffer

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import join_thread [as 别名]
class gpib:
    visaLib = visa.VisaLibrary()
    delay = 0#command transmit delay
    values_format = pyvisa.highlevel.single | pyvisa.highlevel.big_endian #this is now a keithley 2400 does binary transfers
    chunk_size = 102400 #need a slightly bigger transfer buffer than default to be able to transfer a full sample buffer (2500 samples) from a keithley 2400 in one shot
    def __init__(self,locationString=None,timeout=30,useQueues=False):
        self.locationString = locationString
        self.timeout = timeout
        self.useQueues = useQueues

        if self.locationString is not None:
            if self.useQueues: #queue mode
                #build the queues
                self.task_queue = Queue()
                self.done_queue = Queue()
                #kickoff the worker process
                self.p = Process(target=self._worker, args=(self.task_queue, self.done_queue))
                self.p.start()
            else:#non-queue mode
                self.v = visa.instrument(self.locationString,timeout=self.timeout,chunk_size=self.chunk_size,delay=self.delay,values_format=self.values_format)

    def __del__(self):
        if self.useQueues:
            if self.p.is_alive():
                self.task_queue.put('STOP')
            self.p.join()
            self.task_queue.close()
            self.done_queue.close()
            self.task_queue.join_thread()
            self.done_queue.join_thread()
        else:
            if hasattr(self,'v'):
                self.v.close()

    def _worker(self, inputQ, outputQ):
        #local, threadsafe instrument object created here
        v = visa.instrument(self.locationString,timeout=self.timeout,chunk_size=self.chunk_size,delay=self.delay,values_format=self.values_format)
        for func, args in iter(inputQ.get, 'STOP'):#queue processing going on here
            try:
                toCall = getattr(v,func)
                ret = toCall(*args)#visa function call occurs here
            except:
                ret = None
            if ret: #don't put None outputs into output queue
                outputQ.put(ret)
        print "queue worker closed properly"
        v.close()
        inputQ.close()
        outputQ.close()

    #make queue'd and non-queued writes look the same to the client
    def write(self,string):
        if self.useQueues:
            self.task_queue.put(('write',(string,)))
        else:
            self.v.write(string)

    #controls remote enable line
    def controlRen(self,mode):
        visa.Gpib()._vpp43.gpib_control_ren(mode)

    def clearInterface(self):
        self.visaLib.gpib_send_ifc()

    def findInstruments(self):
        return visa.get_instruments_list()    
开发者ID:adharsh27r,项目名称:i-v-vs-time-taker,代码行数:68,代码来源:gpib.py


注:本文中的multiprocessing.Queue.join_thread方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。