当前位置: 首页>>代码示例>>Python>>正文


Python Queue.full方法代码示例

本文整理汇总了Python中queue.Queue.full方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.full方法的具体用法?Python Queue.full怎么用?Python Queue.full使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在queue.Queue的用法示例。


在下文中一共展示了Queue.full方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import full [as 别名]
class EntityQueue:
    def __init__(self, maxsize = 1000):
        self.queue = Queue(maxsize)
        self.enqueuing_flags = {}

    def put(self, item, block = True, timeout = None):
        self.queue.put(item, block, timeout=timeout)

    def get(self, block = True, timeout = None):
        return self.queue.get(block, timeout)

    def qsize(self):
        return self.queue.qsize()

    def empty(self):
        return self.queue.empty() and not self.is_enqueuing()

    def full(self):
        return self.queue.full()

    def add_enqueuing_flag(self, id):
        self.enqueuing_flags[id] = True

    def update_enqueuing_flag(self, id, state):
        self.enqueuing_flags[id] = state

    def is_enqueuing(self):
        is_enqueuing = True

        for flag in self.enqueuing_flags.values():
            is_enqueuing = is_enqueuing and flag

        return is_enqueuing
开发者ID:OParl,项目名称:validator,代码行数:35,代码来源:entity_queue.py

示例2: __init__

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import full [as 别名]
class FileVideoStream:
	def __init__(self, path, queueSize=128):
		# initialize the file video stream along with the boolean
		# used to indicate if the thread should be stopped or not
		self.stream = cv2.VideoCapture(path)
		self.stopped = False

		# initialize the queue used to store frames read from
		# the video file
		self.Q = Queue(maxsize=queueSize)

	def start(self):
		# start a thread to read frames from the file video stream
		t = Thread(target=self.update, args=())
		t.daemon = True
		t.start()
		return self

	def update(self):
		# keep looping infinitely
		while True:
			# if the thread indicator variable is set, stop the
			# thread
			if self.stopped:
				return

			# otherwise, ensure the queue has room in it
			if not self.Q.full():
				# read the next frame from the file
				(grabbed, frame) = self.stream.read()

				# if the `grabbed` boolean is `False`, then we have
				# reached the end of the video file
				if not grabbed:
					self.stop()
					return

				# add the frame to the queue
				self.Q.put(frame)

	def read(self):
		# return next frame in the queue
		return self.Q.get()

	def more(self):
		# return True if there are still frames in the queue
		return self.Q.qsize() > 0

	def stop(self):
		# indicate that the thread should be stopped
		self.stopped = True
开发者ID:hajungong007,项目名称:imutils,代码行数:53,代码来源:filevideostream.py

示例3: ScrambleGenerator

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import full [as 别名]
class ScrambleGenerator():
    def __init__(self, size = 3, capacity = 10, random_state = True, moves = -1):
        self.cube = Cube(size)
        self.queue = Queue(max((capacity, 0)))
        self.random_state = random_state
        self.moves = moves
        self.thread = Thread(target=self.enqueue_scramble)
        self.stopped = False
        self.thread.start()

    def enqueue_scramble(self):
        """Fill a given Queue with scramble until it is either full or a given capacity has been reached"""
        while not self.stopped:
            if not self.queue.full():
                self.queue.put(self.cube.get_scramble(self.random_state, self.moves))

    def __next__(self):
        """Remove and return the next scramble in the queue"""
        return self.queue.get()

    def __enter__(self):
        """Start the scramble generating thread"""
        if self.stopped:
            self.stopped = False
            self.thread.start()
        return self

    def __exit__(self, type = None, value = None, traceback = None):
        """Stop the scramble generating thread"""
        if not self.stopped:
            self.stopped = True
            self.thread.join()

    def __iter__(self):
        """Make this generator iterable by return itself"""
        return self

    start, stop = __enter__, __exit__
开发者ID:kroq-gar78,项目名称:termcube,代码行数:40,代码来源:cube.py

示例4: __init__

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import full [as 别名]
class ScrambleGenerator:
    def __init__(self, puzzle=None, random=True, length=None, capacity=10):
        self.puzzle = puzzle if puzzle else Cube(3)
        self.queue = Queue(max((capacity, 1)))
        self.random = random
        self.length = length
        self.thread = Thread(target=self.enqueue_scramble)
        self.stopped = False
        self.thread.start()

    def enqueue_scramble(self):
        """Fill a given Queue with scramble until it is either full or a given capacity has been reached"""
        while not self.stopped:
            if not self.queue.full():
                self.queue.put(self.puzzle.get_scramble(self.random, self.length))

    def __next__(self):
        """Remove and return the next scramble in the queue"""
        return self.queue.get()

    def __enter__(self):
        """Start the scramble generating thread"""
        if self.stopped:
            self.stopped = False
            self.thread.start()
        return self

    def __exit__(self, type=None, value=None, traceback=None):
        """Stop the scramble generating thread"""
        if not self.stopped:
            self.stopped = True
            self.thread.join()

    def __iter__(self):
        """Make this generator iterable by return itself"""
        return self

    start, stop = __enter__, __exit__
开发者ID:oddlespuddle,项目名称:termcube,代码行数:40,代码来源:scrambler.py

示例5: Socket

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import full [as 别名]
class Socket(Service):
    __slots__ = ('connection_pool', 'timeout', 'connection', 'send_and_receive')

    on_unix = getattr(socket, 'AF_UNIX', False)
    Connection = namedtuple('Connection', ('connect_to', 'proto', 'sockopts'))
    protocols = {
        'tcp': (socket.AF_INET, socket.SOCK_STREAM),
        'udp': (socket.AF_INET, socket.SOCK_DGRAM),
    }
    streams = set(('tcp',))
    datagrams = set(('udp',))
    inet = set(('tcp', 'udp',))
    unix = set()

    if on_unix:
        protocols.update({
            'unix_dgram': (socket.AF_UNIX, socket.SOCK_DGRAM),
            'unix_stream': (socket.AF_UNIX, socket.SOCK_STREAM)
        })
        streams.add('unix_stream')
        datagrams.add('unix_dgram')
        unix.update(('unix_stream', 'unix_dgram'))

    def __init__(self, connect_to, proto, version=None,
                headers=empty.dict, timeout=None, pool=0, raise_on=(500, ), **kwargs):
        super().__init__(timeout=timeout, raise_on=raise_on, version=version, **kwargs)
        connect_to = tuple(connect_to) if proto in Socket.inet else connect_to
        self.timeout = timeout
        self.connection = Socket.Connection(connect_to, proto, set())
        self.connection_pool = Queue(maxsize=pool if pool else 1)

        if proto in Socket.streams:
            self.send_and_receive = self._stream_send_and_receive
        else:
            self.send_and_receive = self._dgram_send_and_receive

    def settimeout(self, timeout):
        """Set the default timeout"""
        self.timeout = timeout

    def setsockopt(self, *sockopts):
        """Add socket options to set"""
        if type(sockopts[0]) in (list, tuple):
            for sock_opt in sockopts[0]:
                level, option, value = sock_opt
                self.connection.sockopts.add((level, option, value))
        else:
            level, option, value = sockopts
            self.connection.sockopts.add((level, option, value))

    def _register_socket(self):
        """Create/Connect socket, apply options"""
        _socket = socket.socket(*Socket.protocols[self.connection.proto])
        _socket.settimeout(self.timeout)

        # Reconfigure original socket options.
        if self.connection.sockopts:
            for sock_opt in self.connection.sockopts:
                level, option, value = sock_opt
                _socket.setsockopt(level, option, value)

        _socket.connect(self.connection.connect_to)
        return _socket

    def _stream_send_and_receive(self, _socket, message, *args, **kwargs):
        """TCP/Stream sender and receiver"""
        data = BytesIO()

        _socket_fd = _socket.makefile(mode='rwb', encoding='utf-8')
        _socket_fd.write(message.encode('utf-8'))
        _socket_fd.flush()

        for received in _socket_fd:
            data.write(received)
        data.seek(0)

        _socket_fd.close()
        return data

    def _dgram_send_and_receive(self, _socket, message, buffer_size=4096, *args):
        """User Datagram Protocol sender and receiver"""
        _socket.sendto(message.encode('utf-8'), self.connection.connect_to)
        data, address = _socket.recvfrom(buffer_size)
        return BytesIO(data)

    def request(self, message, timeout=False, *args, **kwargs):
        """Populate connection pool, send message, return BytesIO, and cleanup"""
        if not self.connection_pool.full():
            self.connection_pool.put(self._register_socket())

        _socket = self.connection_pool.get()

        # setting timeout to None enables the socket to block.
        if timeout or timeout is None:
            _socket.settimeout(timeout)

        data = self.send_and_receive(_socket, message, *args, **kwargs)

        if self.connection.proto in Socket.streams:
            _socket.shutdown(socket.SHUT_RDWR)
#.........这里部分代码省略.........
开发者ID:KelseyHale,项目名称:hug,代码行数:103,代码来源:use.py

示例6: Scraper

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import full [as 别名]

#.........这里部分代码省略.........
			self.pending_routes = list(routes)
		self.pending_routes = sorted(self.pending_routes, key=lambda _: randint(-10,10))
		logging.info("Randomised routes")

	def rescan(self, date):
		try:
			self.date = datetime.strptime(date, Scraper.DATE_FORMAT)
		except ValueError:
			logging.error("Invalid date specified; should be in the format MM-DD-YYYY")
			return

		print("\n")
		msg = datetime.strftime(self.date, "Rescanning failed routes (SoldOutOrInvalid) for date "+Scraper.DATE_OUTPUT_FORMAT)
		logging.info(msg)
		logging.info("If the scan fails again, the repeat result will still be output (with the new search date)")

		self.pending_routes = [r for r in self.routes if r.scraped and r.no_search_result]
		for r in self.pending_routes:
			r.reset()

		self.randomise_routes()

		logging.info("Set target routes to search to scraped routes with no search result ('ERROR:SoldOutOrInvalid')")
		logging.info("{} routes in total".format(len(self.pending_routes)))
		print("\n")

		self.start()


	def start(self):
		"""Initiate the scrape.

			If the route list is very long, it may take several days or even weeks to complete,
			but can be gracefully interrupted by calling stop() and resumed later.

			The (overall average) scrape rate is estimated at 1000 per hour per account.
			With 5 user accounts, that is about 100,000 per day (running uninterrupted).
		"""
		if not self._init_scrape():
			return

		self.sq = Queue(self.nsessions)
		self.rq = Queue(len(self.pending_routes))
		for route in self.pending_routes:
			self.rq.put(route)

		for i in range(self.nsessions):
			t = Thread(target=self._do_scrape)
			t.daemon = True
			t.start()

		self.results.begin_write()

		for session in self.sessions:
			self.sq.put(session)

		while(True):
			if(self.rq.empty()):
				break
			# necessary because rq.join() blocks and prevents interrupt; signal.pause() not an option on Windows
			time.sleep(1)

		self.results.end_write()

	def stop(self):
		"""Stop the scrape (awaiting completion of active searches."""
开发者ID:ak2912,项目名称:Lifemiles,代码行数:70,代码来源:lifemiles.py

示例7: __init__

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import full [as 别名]
class SerialPort:

    __QUEUE_SIZE = 256
    
    def __init__(self, portNumber, baudRate):

        # Decrement the port number as the serial module starts
        # count at 0
        self._portNumber = portNumber - 1
        self._baudRate = baudRate
        
        self._receiveQueue = Queue(SerialPort.__QUEUE_SIZE)

        # Thread, Comm, and thread flag initialisation
        self.reset()

    def openPort(self):
        # Check to make sure the port isn't already created or open
        if self._serialPort is not None and self._serialPort.isOpen():
            raise SerialPortException("Serial Port is already openned.")

        # Create and open the serial port
        self._serialPort = serial.Serial(self._portNumber, self._baudRate)

    def beginReceiving(self):
        # Check if the serial port is open
        if self._serialPort is None:
            raise SerialPortException("Serial Port hasn't been initialised.")

        # Check if a comm thread already exists
        if self._communicationThread is not None:
            raise ThreadException("A communication thread is already running.")

        # Create the thread and start it reading the port    
        self._communicationThread = Thread(target=self.read)
        self._communicationThread.daemon = True
        self._communicationThread.start()
        
    def read(self):
        while not self._killThread:
            # If the queue becomes full (hasn't been read from in ages)
            # discard the oldest item
            if self._receiveQueue.full():
                self._receiveQueue.get()
    
            self._receiveQueue.put(self._serialPort.readline());
            
    def readBuffer(self):
        # Should think of what is the best output format for this
        # Just going to use an array at this stage

        output = list()
        while not self._receiveQueue.empty():
            output.append(self._receiveQueue.get())

        return output

    def reset(self):
        # Initialise the serial port and comm thread to null
        self._serialPort = None
        self._communicationThread = None

        # Initiaise the thread termination flag
        self._killThread = False
        
    def closePort(self):
        # Make sure the port isn't already closed
        if self._serialPort is None or not self._serialPort.isOpen():
            raise SerialPortException("Serial Port is either already closed or not initialised.")

        # Set the termination flag and wait for the thread to terminate execution
        while self._communicationThread and self._communicationThread.isAlive():
            self._killThread = True

        self._serialPort.close()

        self.reset()
开发者ID:bronsonp,项目名称:Project-Mario,代码行数:79,代码来源:PiSerial.py

示例8: __init__

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import full [as 别名]
class LevelCache:

    def __init__(self, level, cache_arr, no_of_ops_to_track=10):
        self.level = level
        self.caches = cache_arr #.extend([None] * (self.level - len(cache_arr)))
        self.last_read = Queue(no_of_ops_to_track)
        self.last_write = Queue(no_of_ops_to_track)

    def write(self, key, value):
        # return write time
        # print(key, value, cache.lookup(key, value))
        write_time = 0
        level_found_on = None
        for level, cache in enumerate(self.caches):
            if not level_found_on:
                myvalue = cache.read(key)
                write_time += cache.read_time
                if myvalue:
                    level_found_on = level + 1
            if not cache.lookup(key, value):
                cache.write(key, value)
                write_time += cache.write_time
            else:
                #write_time += cache.read_time
                break
        #print("level {}".format(level_found_on))

        self.add_write_time(write_time)
        return write_time

    def read(self, key):
        # return read time
        read_time = 0
        level, value = 0, 0
        for level, cache in enumerate(self.caches):
            value = cache.read(key)
            read_time += cache.read_time
            if value:
                for cache in self.caches[:level]:
                    cache.write(key, value)
                    read_time += cache.write_time
                self.add_read_time(read_time)
                return read_time
        return 0

    def stats(self):
        for cache in self.caches:
            print("Usage: {}/{}".format(cache.filled(), cache.capacity))
        #print("Avg Read Time: {}".format(avg(self.last_read)))

    def add_write_time(self, time):
        if not self.last_write.full():
            self.last_write.put(time)
        else:
            self.last_write.get()
            self.last_write.put(time)

    def add_read_time(self, time):
        if not self.last_read.full():
            self.last_read.put(time)
        else:
            self.last_read.get()
            self.last_read.put(time)
开发者ID:mascot6699,项目名称:Code-X,代码行数:65,代码来源:LRUCache.py

示例9: Build

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import full [as 别名]

#.........这里部分代码省略.........
        """
        # Generate a unique project build directory name that will be symlinked to the actual project directory
        # later on when the project gets fetched.
        build_specific_project_directory = self._generate_unique_symlink_path_for_build_repo()

        # Because build_specific_project_directory is entirely internal and generated by ClusterRunner (it is a
        # build-unique generated symlink), we must manually add it to the project_type_params
        project_type_params = self.build_request.build_parameters()
        project_type_params.update({'build_project_directory': build_specific_project_directory})
        self._project_type = util.create_project_type(project_type_params)
        if self._project_type is None:
            raise BuildProjectError('Build failed due to an invalid project type.')

    def prepare(self, subjob_calculator):
        """
        :param subjob_calculator: Used after project fetch to atomize and group subjobs for this build
        :type subjob_calculator: SubjobCalculator
        """
        if not isinstance(self.build_request, BuildRequest):
            raise RuntimeError('Build {} has no associated request object.'.format(self._build_id))

        if not isinstance(self.project_type, ProjectType):
            raise RuntimeError('Build {} has no project set.'.format(self._build_id))

        if not self._preparation_coin.spend():
            raise RuntimeError('prepare() was called more than once on build {}.'.format(self._build_id))

        self._state_machine.trigger(BuildEvent.START_PREPARE)
        # WIP(joey): Move the following code into a PREPARING state callback
        #  (so that it won't execute if the build has already been canceled.)

        self._logger.info('Fetching project for build {}.', self._build_id)
        self.project_type.fetch_project()
        self._logger.info('Successfully fetched project for build {}.', self._build_id)

        job_config = self.project_type.job_config()
        if job_config is None:
            raise RuntimeError('Build failed while trying to parse clusterrunner.yaml.')

        subjobs = subjob_calculator.compute_subjobs_for_build(self._build_id, job_config, self.project_type)

        self._unstarted_subjobs = Queue(maxsize=len(subjobs))  # WIP(joey): Move this into BuildScheduler?
        self._finished_subjobs = Queue(maxsize=len(subjobs))  # WIP(joey): Remove this and just record finished count.

        for subjob in subjobs:
            self._all_subjobs_by_id[subjob.subjob_id()] = subjob
            self._unstarted_subjobs.put(subjob)

        self._timing_file_path = self._project_type.timing_file_path(job_config.name)
        app.util.fs.create_dir(self._build_results_dir())
        self._state_machine.trigger(BuildEvent.FINISH_PREPARE)

    def build_id(self):
        """
        :rtype: int
        """
        return self._build_id

    @property
    def build_request(self):
        """
        :rtype: BuildRequest
        """
        return self._build_request

    def all_subjobs(self):
开发者ID:Medium,项目名称:ClusterRunner,代码行数:70,代码来源:build.py

示例10: FluentSender

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import full [as 别名]
class FluentSender(sender.FluentSender):
    def __init__(self,
                 tag,
                 host='localhost',
                 port=24224,
                 bufmax=1 * 1024 * 1024,
                 timeout=3.0,
                 verbose=False,
                 buffer_overflow_handler=None,
                 nanosecond_precision=False,
                 msgpack_kwargs=None,
                 queue_maxsize=DEFAULT_QUEUE_MAXSIZE,
                 queue_circular=DEFAULT_QUEUE_CIRCULAR,
                 **kwargs):
        """
        :param kwargs: This kwargs argument is not used in __init__. This will be removed in the next major version.
        """
        super(FluentSender, self).__init__(tag=tag, host=host, port=port, bufmax=bufmax, timeout=timeout,
                                           verbose=verbose, buffer_overflow_handler=buffer_overflow_handler,
                                           nanosecond_precision=nanosecond_precision,
                                           msgpack_kwargs=msgpack_kwargs,
                                           **kwargs)
        self._queue_maxsize = queue_maxsize
        self._queue_circular = queue_circular

        self._thread_guard = threading.Event()  # This ensures visibility across all variables
        self._closed = False

        self._queue = Queue(maxsize=queue_maxsize)
        self._send_thread = threading.Thread(target=self._send_loop,
                                             name="AsyncFluentSender %d" % id(self))
        self._send_thread.daemon = True
        self._send_thread.start()

    def close(self, flush=True):
        with self.lock:
            if self._closed:
                return
            self._closed = True
            if not flush:
                while True:
                    try:
                        self._queue.get(block=False)
                    except Empty:
                        break
            self._queue.put(_TOMBSTONE)
            self._send_thread.join()

    @property
    def queue_maxsize(self):
        return self._queue_maxsize

    @property
    def queue_blocking(self):
        return not self._queue_circular

    @property
    def queue_circular(self):
        return self._queue_circular

    def _send(self, bytes_):
        with self.lock:
            if self._closed:
                return False
            if self._queue_circular and self._queue.full():
                # discard oldest
                try:
                    self._queue.get(block=False)
                except Empty:  # pragma: no cover
                    pass
            try:
                self._queue.put(bytes_, block=(not self._queue_circular))
            except Full:    # pragma: no cover
                return False    # this actually can't happen

            return True

    def _send_loop(self):
        send_internal = super(FluentSender, self)._send_internal

        try:
            while True:
                bytes_ = self._queue.get(block=True)
                if bytes_ is _TOMBSTONE:
                    break

                send_internal(bytes_)
        finally:
            self._close()

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.close()
开发者ID:fluent,项目名称:fluent-logger-python,代码行数:94,代码来源:asyncsender.py

示例11: LockingDeque

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import full [as 别名]
class LockingDeque():
  '''merge of some deque and Queue object features

  This provides the locking interface of the Queue and pop, popleft, append,
  appendleft and clear features of the deque.

  Example:
    import time

    ld = LockingQueue()
    def example_task(ld):
      lq.wait() # => task will stop until an item is appended
      print(lq.pop())

    thread = Thread(target=example_task, args=(ld,))
    thread.start()   # thread started and locked
    time.sleep(10)
    ld.append("bob") # thread prints "bob"
                     # thread finished
    time.sleep(0.1)
    assert(thread.is_alive() == False)

  '''

  def __init__(self, *args, **kwargs):
    self.deque         = deque(maxlen=HsmWithQueues.QUEUE_SIZE)
    self.locking_queue = Queue(maxsize=HsmWithQueues.QUEUE_SIZE)

  def get(self, block=True, timeout=None):
    '''block on the locking queue, popleft from deque'''
    return self.locking_queue.get(block, timeout)

  def wait(self, block=True, timeout=None):
    '''wait for an append/appendleft event'''
    return self.get(block, timeout)

  def popleft(self):
    return self.deque.popleft()

  def pop(self):
    return self.deque.pop()

  def append(self, item):
    if self.locking_queue.full() is False:
      # we don't care about storing items in the locking_queue, our information
      # is in the deque, the locking_queue provides the 'get' unlocking feature
      self.locking_queue.put("ready")
      self.deque.append(item)
    else:
      self.deque.rotate(1)
      self.deque.append(item)

    if self.locking_queue.qsize() < len(self.deque):
      while self.locking_queue.qsize() != len(self.deque):
        self.locking_queue.put("ready")

  def appendleft(self, item):
    if self.locking_queue.full() is False:
      # we don't care about storing items in the locking_queue, our information
      # is in the deque, the locking_queue provides the 'get' locking feature
      self.locking_queue.put("ready")
      self.deque.appendleft(item)

    if self.locking_queue.qsize() < len(self.deque):
      while self.locking_queue.qsize() != len(self.deque):
        self.locking_queue.put("ready")

  def clear(self):
    self.deque.clear()
    try:
      while(True):
        self.locking_queue.get_nowait()
    except:
      self.locking_queue.task_done()

  def task_done(self):
    self.locking_queue.task_done()  # so that join can work

  def qsize(self):
    return self.locking_queue.qsize()

  def __len__(self):
    return len(self.deque)

  def len(self):
    return len(self.deque)
开发者ID:aleph2c,项目名称:miros,代码行数:88,代码来源:activeobject.py

示例12: int

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import full [as 别名]
from queue import Queue

try:
    while True:
        max_num = int(input(''))
        num = int(input(''))
        q = Queue(maxsize=max_num)
        l = []
        counter = 0
        for i in range(num):
            x = int(input(''))
            if q.full():
                if x not in l:
                    t = q.get()
                    q.put(x)
                    l.remove(t)
                    l.append(x)
                    counter += 1
            else:
                if x not in l:
                    q.put(x)
                    l.append(x)
                    counter += 1
        print(counter)
except EOFError as e:
    pass
开发者ID:JJWSSS,项目名称:DailyPractice,代码行数:28,代码来源:页面调度算法.py

示例13: Build

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import full [as 别名]

#.........这里部分代码省略.........
        with the unique workspace directory path for this build.

        :raises BuildProjectError when failed to instantiate project type
        """
        # Generate a unique project build directory name that will be symlinked to the actual project directory
        # later on when the project gets fetched.
        build_specific_project_directory = self._generate_unique_symlink_path_for_build_repo()

        # Because build_specific_project_directory is entirely internal and generated by ClusterRunner (it is a
        # build-unique generated symlink), we must manually add it to the project_type_params
        project_type_params = self.build_request.build_parameters()
        project_type_params.update({'build_project_directory': build_specific_project_directory})
        self._project_type = util.create_project_type(project_type_params)

        if self._project_type is None:
            raise BuildProjectError('Build failed due to an invalid project type.')

    def prepare(self, subjob_calculator):
        """
        :param subjob_calculator: Used after project fetch to atomize and group subjobs for this build
        :type subjob_calculator: SubjobCalculator
        """
        if not isinstance(self.build_request, BuildRequest):
            raise RuntimeError('Build {} has no associated request object.'.format(self._build_id))

        if not isinstance(self.project_type, ProjectType):
            raise RuntimeError('Build {} has no project set.'.format(self._build_id))

        if not self._preparation_coin.spend():
            raise RuntimeError('prepare() was called more than once on build {}.'.format(self._build_id))

        self._logger.info('Fetching project for build {}.', self._build_id)
        self.project_type.fetch_project()
        self._logger.info('Successfully fetched project for build {}.', self._build_id)

        job_config = self.project_type.job_config()
        if job_config is None:
            raise RuntimeError('Build failed while trying to parse clusterrunner.yaml.')

        subjobs = subjob_calculator.compute_subjobs_for_build(self._build_id, job_config, self.project_type)

        self._unstarted_subjobs = Queue(maxsize=len(subjobs))
        self._finished_subjobs = Queue(maxsize=len(subjobs))

        for subjob in subjobs:
            self._all_subjobs_by_id[subjob.subjob_id()] = subjob
            self._unstarted_subjobs.put(subjob)

        self._timing_file_path = self._project_type.timing_file_path(job_config.name)
        self.is_prepared = True
        self._record_state_timestamp(BuildStatus.PREPARED)

    def build_id(self):
        """
        :rtype: int
        """
        return self._build_id

    def all_subjobs(self):
        """
        Returns a list of subjobs for this build
        :rtype: list[Subjob]
        """
        return [subjob for subjob in self._all_subjobs_by_id.values()]

    def subjob(self, subjob_id):
开发者ID:sagannotcarl,项目名称:ClusterRunner,代码行数:70,代码来源:build.py

示例14: test_mutate_input_with_threads

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import full [as 别名]
def test_mutate_input_with_threads():
    """Input is mutable when using the threading backend"""
    q = Queue(maxsize=5)
    Parallel(n_jobs=2, backend="threading")(delayed(q.put, check_pickle=False)(1) for _ in range(5))
    nose.tools.assert_true(q.full())
开发者ID:MMX13,项目名称:postergen,代码行数:7,代码来源:test_parallel.py

示例15: Queue

# 需要导入模块: from queue import Queue [as 别名]
# 或者: from queue.Queue import full [as 别名]
# importing only the Queue from the queue module
from queue import Queue

# taking an object of Queue()
q = Queue()
print("Initially the size of queue is %s" % q.qsize())
print("Checking whether queue is empty or not. Empty?? = %s" % q.empty())

# enqueueing some value in the object of Queue
q.put('A')
q.put('B')
q.put('C')
q.put('D')
q.put('E')

print("After adding some value, size of queue is %s" % q.qsize())
print("Checking whether queue is full or not. Full?? = %s" % q.full())

# retrieving the values of the Queue
for i in range(q.qsize()):
    print("Retrieved = ", end=' ')
    print(q.get())

# after retrieving, check the size of the object
print("Size of queue is = %s " % q.qsize())
开发者ID:1989tianlong,项目名称:journaldev,代码行数:27,代码来源:queue_full_empty.py


注:本文中的queue.Queue.full方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。