当前位置: 首页>>代码示例>>Python>>正文


Python Queue.get方法代码示例

本文整理汇总了Python中multiprocessing.Queue.get方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.get方法的具体用法?Python Queue.get怎么用?Python Queue.get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.Queue的用法示例。


在下文中一共展示了Queue.get方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: calculate_quality_list

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import get [as 别名]
 def calculate_quality_list(self, voi, gantry, couch, calculate_from=0, stepsize=1.0, avoid=[], gradient=True):
     """ TODO: Documentation
     """
     q = Queue(32767)
     process = []
     d = voi.get_voi_cube()
     d.cube = np.array(d.cube, dtype=np.float32)
     voi_cube = DensityProjections(d)
     result = []
     for gantry_angle in gantry:
         p = Process(
             target=self.calculate_angle_quality_thread,
             args=(voi, gantry_angle, couch, calculate_from, stepsize, q, avoid, voi_cube, gradient))
         p.start()
         p.deamon = True
         process.append(p)
         if len(process) > 2:
             tmp = q.get()
             result.append(tmp)
             for p in process:
                 if not p.is_alive():
                     process.remove(p)
     while not len(result) == len(gantry) * len(couch):
         tmp = q.get()
         result.append(tmp)
     return result
开发者ID:pytrip,项目名称:pytrip,代码行数:28,代码来源:paths.py

示例2: parallel_precompute

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import get [as 别名]
def parallel_precompute(global_conf_file=None):
    # Define queues
    queueIn = Queue(nb_workers+2)
    queueOut = Queue(nb_workers+8)
    queueProducer = Queue()
    queueFinalizer = Queue()
    queueConsumer = Queue(nb_workers)

    # Start finalizer
    t = Process(target=finalizer, args=(global_conf_file, queueOut, queueFinalizer))
    t.daemon = True
    t.start()
    # Start consumers
    for i in range(nb_workers):
        t = Process(target=consumer, args=(global_conf_file, queueIn, queueOut, queueConsumer))
        t.daemon = True
        t.start()
    # Start producer
    t = Process(target=producer, args=(global_conf_file, queueIn, queueProducer))
    t.daemon = True
    t.start()

    # Wait for everything to be started properly
    producerOK = queueProducer.get()
    finalizerOK = queueFinalizer.get()
    for i in range(nb_workers):
        consumerOK = queueConsumer.get()
    print "[parallel_precompute: log] All workers are ready."
    sys.stdout.flush()
    # Wait for everything to be finished
    finalizerEnded = queueFinalizer.get()
    print "[parallel_precompute: log] Done at {}".format(get_now())
    return
开发者ID:svebk,项目名称:DeepSentiBank_memex,代码行数:35,代码来源:precompute_similar_images_parallel_fake.py

示例3: process_labelit

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import get [as 别名]
    def process_labelit(self, inp=False):
        """
        Initiate Labelit runs.
        """
        if self.verbose:
            self.logger.debug("LabelitPP::process_labelit")

        try:
            queue = Queue()
            params = {}
            params["test"] = self.test
            params["cluster"] = False # self.cluster_use # TODO
            params["verbose"] = self.verbose
            args1 = {}
            if inp:
                args1["input"] = inp
            else:
                args1["input"] = self.images
            args1["output"] = queue
            args1["params"] = params
            args1["logger"] = self.logger

            # Import the RunLabelit class
            agent = load_module(seek_module="rapd_agent_index+strategy",
                                directories=self.agent_directories,
                                logger=self.logger)

            Process(target=agent.RunLabelit, kwargs=args1).start()
            self.labelit_results = queue.get()
            self.labelit_log = queue.get()

        except:
            self.logger.exception("**Error in LabelitPP.process_labelit**")
开发者ID:RAPD,项目名称:RAPD,代码行数:35,代码来源:precession.py

示例4: runBacktracking

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import get [as 别名]
 def runBacktracking(self, clauseList, partialAssignment,  literalList, queue):
     result = self.partialInterp(clauseList, partialAssignment)
     if result == 'true':
         queue.put(True)
         return
     if result == 'false':
         queue.put(False)
         return
     
     chosenLiteral = self.chooseLiteral(partialAssignment, literalList)
     
     posDict = dict(partialAssignment)
     posDict[chosenLiteral] = 'true'
     negDict = dict(partialAssignment)
     negDict[chosenLiteral] = 'false'
     
     q1 = Queue()
     q2 = Queue()
     thread1 = Thread(target=self.runBacktracking, args=(copy.deepcopy(clauseList), posDict, list(literalList), q1))
     thread1.start()
     thread2 = Thread(target=self.runBacktracking, args=(copy.deepcopy(clauseList), negDict, list(literalList), q2))
     thread2.start()
     thread1.join()
     thread2.join()
     
     if(q1.get()):
         queue.put(True)
         return
     if(q2.get()):
         queue.put(True)
         return
     queue.put(False)
     return
开发者ID:GriffinLedingham,项目名称:AIProject,代码行数:35,代码来源:DPLL.py

示例5: parallel_return_function

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import get [as 别名]
def parallel_return_function(function, args_list, threads):
	""" Run function using multiple threads """
	values = []
	processes = []
	queue = Queue()
	for arguments in args_list: # run function for each set of args in args_list
		arguments['queue'] = queue # append queue to list of args
		p = Process(target=function, kwargs=arguments)
		processes.append(p)
		processes[-1].start()
		while len(processes) >= threads: # control number of active processes
			indexes = []
			for index, process in enumerate(processes):
				if process.is_alive(): # keep processes that are still alive
					indexes.append(index)
				else: # return values from processes that are finished
					values.append(queue.get())
			processes = [processes[i] for i in indexes] # update list of processes
	# wait until there are no active processes
	while len(processes) > 0:
		indexes = []
		for index, process in enumerate(processes):
			if process.is_alive(): # keep processes that are still alive
				indexes.append(index)
			else: # return values from processes that are finished
				values.append(queue.get())
		processes = [processes[i] for i in indexes] # update list of processes
	return values
开发者ID:tarah28,项目名称:MicrobeCensus,代码行数:30,代码来源:training.py

示例6: split_messages

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import get [as 别名]
def split_messages(d, n, nPrime, r, bit,data):
	""" Splits a data set based on the subtraction in montgomery exponentiation."""
	mlist = data
	q_t = Queue()
	q_f = Queue()
	processes = []
	start = 0
	numProcs = 8
	NP = 0
	chunk = len(mlist)//numProcs
	while start < len(mlist):
		p = Process(target=do_sim, args=(q_t, q_f, mlist[start:start+chunk], d, n, nPrime, r, bit))
		NP += 1
		p.start()
		start += chunk
		processes.append(p)
	
	m_true = []
	m_false = []
	for i in range(NP):
		m_true += q_t.get()
		m_false += q_f.get()

	while processes:
		processes.pop().join()
	return (m_true, m_false)
开发者ID:AttackerJin,项目名称:crypto,代码行数:28,代码来源:RSAAttack.py

示例7: test_warc_writer_locking

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import get [as 别名]
def test_warc_writer_locking(tmpdir):
    """Test if WarcWriter is locking WARC files.
    When we don't have the .open suffix, WarcWriter locks the file and the
    external process trying to ``lock_file`` fails (result=0).
    """
    recorder = ProxyingRecorder(None, None, 'sha1', url='http://example.com')
    recorded_url = RecordedUrl(
            url='http://example.com', content_type='text/plain', status=200,
            client_ip='127.0.0.2', request_data=b'abc',
            response_recorder=recorder, remote_ip='127.0.0.3',
            timestamp=datetime.utcnow(), payload_digest=hashlib.sha1())

    dirname = os.path.dirname(str(tmpdir.mkdir('test-warc-writer')))
    wwriter = WarcWriter(Options(
        directory=dirname, no_warc_open_suffix=True))
    wwriter.write_records(recorded_url)
    warcs = [fn for fn in os.listdir(dirname) if fn.endswith('.warc')]
    assert warcs
    target_warc = os.path.join(dirname, warcs[0])
    # launch another process and try to lock WARC file
    q = Queue()
    p = Process(target=lock_file, args=(q, target_warc))
    p.start()
    p.join()
    assert q.get() == 'FAILED TO OBTAIN LOCK'
    wwriter.close()

    # locking must succeed after writer has closed the WARC file.
    p = Process(target=lock_file, args=(q, target_warc))
    p.start()
    p.join()
    assert q.get() == 'OBTAINED LOCK'
开发者ID:internetarchive,项目名称:warcprox,代码行数:34,代码来源:test_writer.py

示例8: TestSetTemp

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import get [as 别名]
class TestSetTemp(OpengbTestCase): 

    def setUp(self):
        self.to_printer = Queue()
        self.message_handler = server.MessageHandler(to_printer=self.to_printer)

    def test_pass_set_temps_method_to_printer(self):
        """Valid set temperatures result in a 'set_temp' message on the to_printer queue."""
        mh = self.message_handler.set_temp(bed=100, nozzle1=200, nozzle2=200)
        self.assertEqual(json.loads(self.to_printer.get())["method"], "set_temp")

    def test_valid_set_temps_passed_to_printer(self):
        """Valid set temperatures are added as a message on the to_printer queue."""
        mh = self.message_handler.set_temp(bed=100, nozzle1=200, nozzle2=200)
        self.assertDictEqual(json.loads(self.to_printer.get()), {
            "method": "set_temp",
            "params": {"bed": 100, "nozzle2": 200, "nozzle1": 200}})

    def test_set_bed_temp_defaults_to_none(self):
        """Unspecified bed_temperature is passed to_the printer as None."""
        mh = self.message_handler.set_temp(nozzle1=200, nozzle2=200)
        self.assertEqual(
            json.loads(self.to_printer.get())["params"]["bed"], None)
    
    def test_set_nozzle1_temp_defaults_to_none(self):
        """Unspecified nozzle1_temperature is passed to_the printer as None."""
        mh = self.message_handler.set_temp(bed=100, nozzle2=200)
        self.assertEqual(
            json.loads(self.to_printer.get())["params"]["nozzle1"], None)

    def test_set_nozzle2_temp_defaults_to_none(self):
        """Unspecified nozzle2_temperature is passed to_the printer as None."""
        mh = self.message_handler.set_temp(bed=100, nozzle1=200)
        self.assertEqual(
            json.loads(self.to_printer.get())["params"]["nozzle2"], None)
开发者ID:yeomps,项目名称:opengb,代码行数:37,代码来源:test_server.py

示例9: test

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import get [as 别名]
def test():
    NUMBER_OF_PROCESSES = 4
    TASK1 = [(mul, (i, 7)) for i in range(20)]
    TASK2 = [(plus, (i, 8)) for i in range(10)]

    task_queue = Queue()
    done_queue = Queue()

    for task in TASK1:
        task_queue.put(task)

    for i in range(NUMBER_OF_PROCESSES):
        Process(target=worker, args=(task_queue, done_queue)).start()

    print 'Unordered results:'
    for i in range(len(TASK1)):
        print '\t', done_queue.get()

    for task in TASK2:
        task_queue.put(task)

    for i in range(len(TASK2)):
        print '\t', done_queue.get()

    for i in range(NUMBER_OF_PROCESSES):
        task_queue.put('STOP')
开发者ID:tenghuanhe,项目名称:PyScript,代码行数:28,代码来源:10.py

示例10: driver

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import get [as 别名]
def driver():
    for ioengine in "sync libaio".split():
        for readwrite in "read write randread randwrite rw randrw".split():
            for sync in "0 1".split():
                for direct in "0 1".split():
                    name = "/home/sharath.g/fio2/{}-{}-o_sync{}-o_direct{}".format(ioengine, readwrite, sync, direct)

                    bs = 32*1024
                    time = 60
                    experiments = [hostname]
                    q1 = Queue()
                    q2 = Queue()
                    # cc('> {}'.format(name))
                    while bs <= 1024*1024*16:
                        pass
                        if direct=="1" and bs % 4096 != 0:
                            bs *= 2
                            continue
                        fio_cmd = "sudo fio --name=global --bs={bs} --ioengine={ioengine} --iodepth=10 --runtime={runtime} --time_based --size=3G --group_reporting --disable_lat=1 --disable_clat=1 --disable_slat=1 --clat_percentiles=0  --filename=/mnt/vdb1/fio/myfile --name={name} --rw={readwrite}  --sync={sync} --direct={direct} --minimal".format(bs=bs, ioengine=ioengine, runtime=time, name=name, readwrite=readwrite, sync=sync, direct=direct)
                        iostat_cmd = "/usr/bin/iostat -k -x -d 1 {} /dev/vdb".format(time)

                        t1 = Process(target=fio, args=(fio_cmd, q1))
                        t2 = Process(target=iostat, args=(iostat_cmd, q2))
                        t1.start()
                        t2.start()
                        t1.join()
                        t2.join()
                        experiments.append([bs, fio_cmd, q1.get(), iostat_cmd, q2.get()])
                        l(json.dumps(experiments, indent=2))
                        bs*=2
                    with open(name, "w") as output_file:
                        json.dump(experiments, output_file, indent=2)
                    log.debug("======================")
                    log.debug(json.dumps(experiments, indent=2))
开发者ID:agsha,项目名称:pyfk,代码行数:36,代码来源:fio_iostat.py

示例11: test_worker_fills_internal_queue_only_until_maximum_queue_size

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import get [as 别名]
def test_worker_fills_internal_queue_only_until_maximum_queue_size():
    """
    Test read workers fill internal queue only to maximum size
    """
    conn = boto3.client('sqs', region_name='us-east-1')
    # Set visibility timeout low to improve test speed
    queue_url = conn.create_queue(
        QueueName="tester", Attributes={'VisibilityTimeout': '1'})['QueueUrl']

    message = json.dumps({
        'task': 'tests.tasks.index_incrementer',
        'args': [],
        'kwargs': {
            'message': 'Test message',
        },
    })
    for i in range(3):
        conn.send_message(QueueUrl=queue_url, MessageBody=message)

    internal_queue = Queue(maxsize=2)
    worker = ReadWorker(queue_url, internal_queue, BATCHSIZE)
    worker.read_message()

    # The internal queue should only have two messages on it
    internal_queue.get(timeout=1)
    internal_queue.get(timeout=1)

    try:
        internal_queue.get(timeout=1)
    except Empty:
        pass
    else:
        raise AssertionError("The internal queue should be empty")
开发者ID:spulec,项目名称:PyQS,代码行数:35,代码来源:test_worker.py

示例12: create_lists

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import get [as 别名]
	def create_lists(self, copy_list, new_loc):
	#Used to create the call lists		
		
		to_q= Queue()
		resp_q = Queue()

		p = Process(target=self.render_thing, args=(to_q,resp_q,self.initalize,new_loc,self.OFFSET, self.FACTOR, self.use_old))
		p.start()
		
		to_q.put(self.diamonds, False)
		to_q.put(copy_list, False)
		to_q.put(self.OFFSET, False)
		to_q.put(self.rw.convert, False)
		
		print "process....."
		print "get pos_list"
		self.pos_list = resp_q.get()
		self.diamonds = resp_q.get()
		print self.pos_list
		new_dic = resp_q.get()
		dic = self.combine(self.pos_list, new_dic, self.trans.location_var)
		self.trans.location_var = dic
		
		p.join()
		
		print "updating...."
		self.rw.need_lists = True
		self.initalize = False
开发者ID:aapope,项目名称:TerrainGeneration,代码行数:30,代码来源:World.py

示例13: __init__

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import get [as 别名]
class chan:
    def __init__(self):
        self._queue = Queue()
        self._closed = False

    def __iter__(self):
        return self

    def __next__(self):
        try:
            return self.get()
        except queue.Empty:
            raise StopIteration

    def put(self, x):
        self._queue.put(x)

    def get(self):
        if self._closed:
            return self._queue.get(False)
        else:
            return self._queue.get()

    def close(self):
        self._closed = True
开发者ID:rjeli,项目名称:gomut,代码行数:27,代码来源:gomut.py

示例14: make_work

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import get [as 别名]
def make_work(callback, tasks, limit, ignore_exceptions=True,
              taskq_size=50):
    """
    Run up to "limit" processes, do tasks and yield results.

    :param callback:  the function that will process single task
    :param tasks:  the sequence or iterator or queue of tasks, each task
        in turn is sequence of arguments, if task is just signle argument
        it should be wrapped into list or tuple
    :param limit: the maximum number of processes
    """
    
    # If tasks is number convert it to the list of number
    if isinstance(tasks, int):
        tasks = xrange(tasks)

    # Ensure that tasks sequence is iterator
    tasks = iter(tasks)    

    taskq = Queue(taskq_size)

    # Here results of task processing will be saved
    resultq = Queue()

    # Prepare and run up to "limit" processes
    processes = []
    for x in xrange(limit):
        process = Worker(callback, taskq, resultq, ignore_exceptions)
        process.daemon = True
        process.start()
        processes.append(process)

    # Put tasks from tasks iterator to taskq queue
    # until tasks iterator ends
    # Do it in separate process
    def task_processor(task_iter, task_queue, limit):
        try:
            for task in task_iter:
                task_queue.put(task)
        finally:
            for x in xrange(limit):
                task_queue.put(STOP)

    processor = Process(target=task_processor, args=[tasks, taskq, limit])
    processor.daemon = True
    processor.start()

    while True:
        try:
            yield resultq.get(True, 0.2)
        except Empty:
            pass
        if not any(x.is_alive() for x in processes):
            break

    while True:
        try:
            yield resultq.get(False)
        except Empty:
            break
开发者ID:Scaurus,项目名称:grab,代码行数:62,代码来源:pwork.py

示例15: testBackTracking

# 需要导入模块: from multiprocessing import Queue [as 别名]
# 或者: from multiprocessing.Queue import get [as 别名]
 def testBackTracking(self):
     testData = [[-1, -2], [1, -2], [-1, -3]]
     testLiterals = [1, 2, 3]
     queue0 = Queue()
     self.dpllTester.runBacktracking(testData, {}, testLiterals, queue0)
     if queue0.get() == False:
         print "Error"
     else:
         print "Passed Test 4"
     
     queue1 = Queue()    
     testData = [[-1, -2], [-1, 2], [1, -2], [2, -3], [1, 3]]
     self.dpllTester.runBacktracking(testData, {}, testLiterals, queue1)
     if queue1.get() == True:
         print "Error"
     else:
         print "Passed Test 5"
     
     queue2 = Queue()    
     testData = [[2, 1], [-1], [-2, -3], [3, 1]]
     self.dpllTester.runBacktracking(testData, {}, testLiterals, queue2)
     if queue2.get() == True:
         print "Error"
     else:
         print "Passed Test 6"
开发者ID:GriffinLedingham,项目名称:AIProject,代码行数:27,代码来源:Tester.py


注:本文中的multiprocessing.Queue.get方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。