当前位置: 首页>>代码示例>>Python>>正文


Python Queue.task_done方法代码示例

本文整理汇总了Python中six.moves.queue.Queue.task_done方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.task_done方法的具体用法?Python Queue.task_done怎么用?Python Queue.task_done使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在six.moves.queue.Queue的用法示例。


在下文中一共展示了Queue.task_done方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: FrameSaver

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import task_done [as 别名]
class FrameSaver( threading.Thread ):
	def __init__( self ):
		threading.Thread.__init__( self )
		self.daemon = True
		self.name = 'FrameSaver'
		self.reset()
	
	def reset( self ):
		self.queue = Queue()
	
	def run( self ):
		self.reset()
		while 1:
			message = self.queue.get()
			if   message[0] == 'Save':
				cmd, fileName, bib, t, frame = message
				#sys.stderr.write( 'save' )
				PhotoFinish.SavePhoto( fileName, bib, t, frame )
				self.queue.task_done()
			elif message[0] == 'Terminate':
				self.queue.task_done()
				self.reset()
				break
	
	def stop( self ):
		self.queue.put( ['Terminate'] )
		self.join()
	
	def save( self, fileName, bib, t, frame ):
		self.queue.put( ['Save', fileName, bib, t, frame] )
开发者ID:esitarski,项目名称:CrossMgr,代码行数:32,代码来源:VideoBuffer.py

示例2: TagGroup

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import task_done [as 别名]
class TagGroup( object ):
	'''
		Process groups of tag reads and return the best time estimated using quadratic regression.
		Stray reads are also detected if there is no quiet period for the tag.
		The first read time of each stray read is returned.
	'''
	def __init__( self ):
		self.q = Queue()
		self.tagInfo = {}
		
	def add( self, antenna, tag, t, db ):
		self.q.put((antenna, tag, t, db))

	def flush( self ):
		# Process all waiting reads.
		while 1:
			try:
				antenna, tag, t, db = self.q.get(False)
			except Empty:
				break
			try:
				self.tagInfo[tag].add( antenna, t, db )
			except KeyError:
				self.tagInfo[tag] = TagGroupEntry( antenna, t, db )
			self.q.task_done()
			
	def getReadsStrays( self, tNow=None, method=QuadraticRegressionMethod, antennaChoice=MostReadsChoice, removeOutliers=True ):
		'''
			Returns two lists:
				reads = [(tag1, t1, sampleSize1, antennaID1), (tag2, t2, sampleSize2, , antennaID2), ...]
				strays = [(tagA, tFirstReadA), (tagB, tFirstReadB), ...]
				
			Each stray will be reported as a read the first time it is detected.
		'''
		self.flush()
		
		trNow = datetimeToTr( tNow or datetime.now() )
		reads, strays = [], []
		toDelete = []
		
		for tag, tge in six.iteritems(self.tagInfo):
			if trNow - tge.lastReadMax >= tQuiet:				# Tag has left read range.
				if not tge.isStray:
					t, sampleSize, antennaID = tge.getBestEstimate(method, antennaChoice, removeOutliers)
					reads.append( (tag, t, sampleSize, antennaID) )
				toDelete.append( tag )
			elif tge.lastReadMax - tge.firstReadMin >= tStray:	# This is a stray.
				t = trToDatetime( tge.firstReadMin )
				if not tge.isStray:
					tge.setStray()
					reads.append( (tag, t, 1, 0) )				# Report stray first read time.
				strays.append( (tag, t) )
				
		for tag in toDelete:
			del self.tagInfo[tag]
		
		reads.sort( key=operator.itemgetter(1,0))
		strays.sort( key=operator.itemgetter(1,0) )
		return reads, strays
开发者ID:esitarski,项目名称:CrossMgr,代码行数:61,代码来源:TagGroup.py

示例3: SubscribeListener

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import task_done [as 别名]
class SubscribeListener(SubscribeCallback):
    def __init__(self):
        self.connected = False
        self.connected_event = Event()
        self.disconnected_event = Event()
        self.presence_queue = Queue()
        self.message_queue = Queue()

    def status(self, pubnub, status):
        if utils.is_subscribed_event(status) and not self.connected_event.is_set():
            self.connected_event.set()
        elif utils.is_unsubscribed_event(status) and not self.disconnected_event.is_set():
            self.disconnected_event.set()

    def message(self, pubnub, message):
        self.message_queue.put(message)

    def presence(self, pubnub, presence):
        self.presence_queue.put(presence)

    def wait_for_connect(self):
        if not self.connected_event.is_set():
            self.connected_event.wait()
        else:
            raise Exception("the instance is already connected")

    def wait_for_disconnect(self):
        if not self.disconnected_event.is_set():
            self.disconnected_event.wait()
        else:
            raise Exception("the instance is already disconnected")

    def wait_for_message_on(self, *channel_names):
        channel_names = list(channel_names)
        while True:
            env = self.message_queue.get()
            self.message_queue.task_done()
            if env.channel in channel_names:
                return env
            else:
                continue

    def wait_for_presence_on(self, *channel_names):
        channel_names = list(channel_names)
        while True:
            env = self.presence_queue.get()
            self.presence_queue.task_done()
            if env.channel in channel_names:
                return env
            else:
                continue
开发者ID:pubnub,项目名称:python,代码行数:53,代码来源:pubnub.py

示例4: _handle_messages_threaded

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import task_done [as 别名]
    def _handle_messages_threaded(self):
        # Handles messages in a threaded fashion.
        queue = Queue()

        def producer_loop():
            # Read messages from file, and queue them for execution.
            for msg in self._read_next_message():
                queue.put(msg)
                # Check if an error occurred.
                if self._done:
                    break
            # Wait until the queue empties out to signal completion from the
            # producer's side.
            if not self._done:
                queue.join()
                self._done = True

        producer = Thread(name="Producer", target=producer_loop)
        # @note Previously, when trying to do `queue.clear()` in the consumer,
        # and `queue.join()` in the producer, there would be intermittent
        # deadlocks. By demoting the producer to a daemon, I (eric.c) have not
        # yet encountered a deadlock.
        producer.daemon = True
        producer.start()

        # Consume.
        # TODO(eric.cousineau): Trying to quit via Ctrl+C is awkward (but kinda
        # works). Is there a way to have `plt.pause` handle Ctrl+C differently?
        try:
            pause = self.scope_globals['pause']
            while not self._done:
                # Process messages.
                while not queue.empty():
                    msg = queue.get()
                    queue.task_done()
                    self._execute_message(msg)
                # Spin busy for a bit, let matplotlib (or whatever) flush its
                # event queue.
                pause(0.01)
        except KeyboardInterrupt:
            # User pressed Ctrl+C.
            self._done = True
            print("Quitting")
        except Exception as e:
            # We encountered an error, and must stop.
            self._done = True
            self._had_error = True
            traceback.print_exc(file=sys.stderr)
            sys.stderr.write("  Stopping (--stop_on_error)\n")
开发者ID:mposa,项目名称:drake,代码行数:51,代码来源:call_python_client.py

示例5: Fluentd

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import task_done [as 别名]
class Fluentd(object):
    def __init__(self, app=None):
        self.app = app
        if app is not None:
            self.init_app(app)
            # Send events after every request finishes
            app.after_request(self.send_events)

        # Unbounded queue for sent events
        self.queue = Queue()
        tag_label = app.config.get('EVENT_TAG_PREFIX', 'flask.fluentd')
        self._sender = sender.FluentSender(tag_label)

    def init_app(self, app):
        # Use the newstyle teardown_appcontext if it's available,
        # otherwise fall back to the request context
        if hasattr(app, 'teardown_appcontext'):
            app.teardown_appcontext(self.send_events)
        else:
            app.teardown_request(self.send_events)

    def event(self, pair):
        tag, evt = pair
        self.queue.put((tag, evt))

    def send_events(self, exception):
        """
        Makes a best-effort to send all the events that it pushed during a
        request but capable of missing some
        """
        pumping = True
        while pumping:
            try:
                tag, evt = self.queue.get_nowait()
                self._sender.emit(tag, evt)
                self.queue.task_done()
            except Empty:
                pumping = False
            except Exception as e:
                # This is bad but it's worse to foul the request because
                # of a logging issue
                logging.exception(e)
                self.queue.task_done()

        return exception
开发者ID:thread,项目名称:flask-fluentd,代码行数:47,代码来源:flask_fluentd.py

示例6: Events

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import task_done [as 别名]
class Events(threading.Thread):
    def __init__(self, callback):
        super(Events, self).__init__()
        self.queue = Queue()
        # http://stackoverflow.com/a/20598791
        self.daemon = False
        self.callback = callback
        self.name = 'EVENT-QUEUE'
        self.stop = threading.Event()

    def put(self, event_type):
        self.queue.put(event_type)

    def run(self):
        """
        Actually runs the thread to process events
        """
        try:
            while not self.stop.is_set():
                try:
                    # get event type
                    event_type = self.queue.get(True, 1)

                    # perform callback if we got a event type
                    self.callback(event_type)

                    # event completed
                    self.queue.task_done()
                except Empty:
                    event_type = None

            # exiting thread
            self.stop.clear()
        except Exception as error:
            log.error(u'Exception generated in thread %s: %s',
                      self.name, ex(error))
            log.debug(repr(traceback.format_exc()))

    # System Events
    class SystemEvent(Event):
        RESTART = 'RESTART'
        SHUTDOWN = 'SHUTDOWN'
开发者ID:pymedusa,项目名称:SickRage,代码行数:44,代码来源:event_queue.py

示例7: Events

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import task_done [as 别名]
class Events(threading.Thread):
    def __init__(self, callback):
        super(Events, self).__init__()
        self.queue = Queue()
        self.daemon = True
        self.callback = callback
        self.name = "EVENT-QUEUE"
        self.stop = threading.Event()

    def put(self, type):
        self.queue.put(type)

    def run(self):
        """
        Actually runs the thread to process events
        """
        try:
            while not self.stop.is_set():
                try:
                    # get event type
                    type = self.queue.get(True, 1)

                    # perform callback if we got a event type
                    self.callback(type)

                    # event completed
                    self.queue.task_done()
                except Empty:
                    type = None

            # exiting thread
            self.stop.clear()
        except Exception as e:
            logger.log("Exception generated in thread " + self.name + ": " + ex(e), logger.ERROR)
            logger.log(repr(traceback.format_exc()), logger.DEBUG)

    # System Events
    class SystemEvent(Event):
        RESTART = "RESTART"
        SHUTDOWN = "SHUTDOWN"
开发者ID:KraXed112,项目名称:SickRage,代码行数:42,代码来源:event_queue.py

示例8: OvnDbNotifyHandler

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import task_done [as 别名]
class OvnDbNotifyHandler(object):

    STOP_EVENT = ("STOP", None, None, None)

    def __init__(self, driver):
        self.driver = driver
        self.__watched_events = set()
        self.__lock = threading.Lock()
        self.notifications = Queue()
        self.notify_thread = greenthread.spawn_n(self.notify_loop)
        atexit.register(self.shutdown)

    def matching_events(self, event, row, updates):
        with self.__lock:
            return tuple(t for t in self.__watched_events
                         if t.matches(event, row, updates))

    def watch_event(self, event):
        with self.__lock:
            self.__watched_events.add(event)

    def watch_events(self, events):
        with self.__lock:
            for event in events:
                self.__watched_events.add(event)

    def unwatch_event(self, event):
        with self.__lock:
            try:
                self.__watched_events.remove(event)
            except KeyError:
                # For ONETIME events, they should normally clear on their own
                pass

    def unwatch_events(self, events):
        with self.__lock:
            for event in events:
                try:
                    self.__watched_events.remove(event)
                except KeyError:
                    # For ONETIME events, they should normally clear on
                    # their own
                    pass

    def shutdown(self):
        self.notifications.put(OvnDbNotifyHandler.STOP_EVENT)

    def notify_loop(self):
        while True:
            try:
                match, event, row, updates = self.notifications.get()
                if (not isinstance(match, row_event.RowEvent) and
                        (match, event, row, updates) == (
                            OvnDbNotifyHandler.STOP_EVENT)):
                    self.notifications.task_done()
                    break
                match.run(event, row, updates)
                if match.ONETIME:
                    self.unwatch_event(match)
                self.notifications.task_done()
            except Exception:
                # If any unexpected exception happens we don't want the
                # notify_loop to exit.
                LOG.exception(_LE('Unexpected exception in notify_loop'))

    def notify(self, event, row, updates=None):
        matching = self.matching_events(
            event, row, updates)
        for match in matching:
            self.notifications.put((match, event, row, updates))
开发者ID:bali2016,项目名称:networking-ovn,代码行数:72,代码来源:ovsdb_monitor.py

示例9: Impinj

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import task_done [as 别名]

#.........这里部分代码省略.........
			)
		)
		
		self.messageQ.put( (
			'Impinj',
			'{} {}. {} - {}{}{}'.format(
					'QuadReg' if quadReg else 'FirstRead',
					self.tagCount,
					tagID,
					discoveryTime.strftime('%H:%M:%S.%f'),
					' samples={}'.format(sampleSize) if sampleSize > 1 else '',
					' antennaID={}'.format(antennaID) if antennaID else '',
			),
			self.antennaReadCount,
			)
		)
		Bell()
		return True
	
	def handleTagGroup( self ):
		if not self.tagGroup:
			return
		reads, strays = self.tagGroup.getReadsStrays( method=ProcessingMethod, antennaChoice=AntennaChoice )
		for tagID, discoveryTime, sampleSize, antennaID in reads:
			self.reportTag( tagID, discoveryTime, sampleSize, antennaID, True )
			
		self.strayQ.put( ('strays', strays) )
		self.tagGroupTimer = threading.Timer( 1.0, self.handleTagGroup )
		self.tagGroupTimer.start()
	
	def handleLogFile( self ):
		while 1:
			msg = self.logQ.get()
			self.logQ.task_done()
			
			if msg[0] == 'shutdown':
				return
			try:
				pf = io.open( self.fname, 'a' )
			except:
				continue
			
			pf.write( msg[1] if msg[1].endswith('\n') else msg[1] + '\n' )
			while 1:
				try:
					msg = self.logQ.get( False )
				except Empty:
					break
				self.logQ.task_done()
				
				if msg[0] == 'shutdown':
					return
				pf.write( msg[1] if msg[1].endswith('\n') else msg[1] + '\n' )
			pf.close()
			time.sleep( 0.1 )
	
	def runServer( self ):
		self.messageQ.put( ('BackupFile', self.fname) )
		
		self.messageQ.put( ('Impinj', '*****************************************' ) )
		self.messageQ.put( ('Impinj', 'Reader Server Started: ({}:{})'.format(self.impinjHost, self.impinjPort) ) )
			
		# Create an old default time for last tag read.
		tOld = getTimeNow() - datetime.timedelta( days = 100 )
		utcfromtimestamp = datetime.datetime.utcfromtimestamp
		
开发者ID:esitarski,项目名称:CrossMgr,代码行数:69,代码来源:Impinj.py

示例10: TCPClientManager

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import task_done [as 别名]
class TCPClientManager(object):
    """A Client for the 'Push' feature in Device Cloud"""

    def __init__(self, conn, secure=True, ca_certs=None, workers=1):
        """
        Arbitrator for multiple TCP Client Sessions

        :param conn: The :class:`devicecloud.DeviceCloudConnection` to use
        :param secure: Whether or not to create a secure SSL wrapped session.
        :param ca_certs: Path to a file containing Certificates.
            If not provided, the devicecloud.crt file provided with the module will
            be used.  In most cases, the devicecloud.crt file should be acceptable.
        :param workers: Number of workers threads to process callback calls.
        """
        self._conn = conn
        self._secure = secure
        self._ca_certs = ca_certs

        # A dict mapping Sockets to their PushSessions
        self.sessions = {}
        # IO thread is used monitor sockets and consume data.
        self._io_thread = None
        # Writer thread is used to send data on sockets.
        self._writer_thread = None
        # Write queue is used to queue up data to write to sockets.
        self._write_queue = Queue()
        # A pool that monitors callback events and invokes them.
        self._callback_pool = CallbackWorkerPool(self._write_queue, size=workers)

        self.closed = False
        self.log = logging.getLogger(__name__)

    @property
    def hostname(self):
        return self._conn.hostname

    @property
    def username(self):
        return self._conn.username

    @property
    def password(self):
        return self._conn.password

    def _restart_session(self, session):
        """Restarts and re-establishes session

        :param session: The session to restart
        """
        # remove old session key, if socket is None, that means the
        # session was closed by user and there is no need to restart.
        if session.socket is not None:
            self.log.info("Attempting restart session for Monitor Id %s."
                          % session.monitor_id)
            del self.sessions[session.socket.fileno()]
            session.stop()
            session.start()
            self.sessions[session.socket.fileno()] = session

    def _writer(self):
        """
        Indefinitely checks the writer queue for data to write
        to socket.
        """
        while not self.closed:
            try:
                sock, data = self._write_queue.get(timeout=0.1)
                self._write_queue.task_done()
                sock.send(data)
            except Empty:
                pass  # nothing to write after timeout
            except socket.error as err:
                if err.errno == errno.EBADF:
                    self._clean_dead_sessions()

    def _clean_dead_sessions(self):
        """
        Traverses sessions to determine if any sockets
        were removed (indicates a stopped session).
        In these cases, remove the session.
        """
        for sck in list(self.sessions.keys()):
            session = self.sessions[sck]
            if session.socket is None:
                del self.sessions[sck]

    def _select(self):
        """
        While the client is not marked as closed, performs a socket select
        on all PushSession sockets.  If any data is received, parses and
        forwards it on to the callback function.  If the callback is
        successful, a PublishMessageReceived message is sent.
        """
        try:
            while not self.closed:
                try:
                    inputready = select.select(self.sessions.keys(), [], [], 0.1)[0]
                    for sock in inputready:
                        session = self.sessions[sock]
                        sck = session.socket
#.........这里部分代码省略.........
开发者ID:digidotcom,项目名称:python-devicecloud,代码行数:103,代码来源:monitor_tcp.py

示例11: CallbackWorkerPool

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import task_done [as 别名]
class CallbackWorkerPool(object):
    """
    A Worker Pool implementation that creates a number of predefined threads
    used for invoking Session callbacks.
    """

    def __init__(self, write_queue=None, size=1):
        """
        Creates a Callback Worker Pool for use in invoking Session Callbacks
        when data is received by a push client.

        :param write_queue: Queue used for queueing up socket write events
            for when a payload message is received and processed.
        :param size: The number of worker threads to invoke callbacks.
        """
        # Used to queue up PublishMessageReceived events to be sent back to
        # the iDigi server.
        self._write_queue = write_queue
        # Used to queue up sessions and data to callback with.
        self._queue = Queue(size)
        # Number of workers to create.
        self.size = size
        self.log = logging.getLogger('{}.callback_worker_pool'.format(__name__))

        for _ in range(size):
            worker = Thread(target=self._consume_queue)
            worker.daemon = True
            worker.start()

    def _consume_queue(self):
        """
        Continually blocks until data is on the internal queue, then calls
        the session's registered callback and sends a PublishMessageReceived
        if callback returned True.
        """
        while True:
            session, block_id, raw_data = self._queue.get()
            data = json.loads(raw_data.decode('utf-8'))  # decode as JSON
            try:
                result = session.callback(data)
                if result is None:
                    self.log.warn("Callback %r returned None, expected boolean.  Messages "
                                  "are not marked as received unless True is returned", session.callback)
                elif result:
                    # Send a Successful PublishMessageReceived with the
                    # block id sent in request
                    if self._write_queue is not None:
                        response_message = struct.pack('!HHH',
                                                       PUBLISH_MESSAGE_RECEIVED,
                                                       block_id, 200)
                        self._write_queue.put((session.socket, response_message))
            except Exception as exception:
                self.log.exception(exception)

            self._queue.task_done()

    def queue_callback(self, session, block_id, data):
        """
        Queues up a callback event to occur for a session with the given
        payload data.  Will block if the queue is full.

        :param session: the session with a defined callback function to call.
        :param block_id: the block_id of the message received.
        :param data: the data payload of the message received.
        """
        self._queue.put((session, block_id, data))
开发者ID:digidotcom,项目名称:python-devicecloud,代码行数:68,代码来源:monitor_tcp.py

示例12: LSFBatchSystem

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import task_done [as 别名]

#.........这里部分代码省略.........
            del self.lsfJobIDs[jobID]

        toKill = set(jobIDs)
        while len(toKill) > 0:
            for jobID in list(toKill):
                if getjobexitcode(self.lsfJobIDs[jobID]) is not None:
                    toKill.remove(jobID)

            if len(toKill) > 0:
                logger.warn("Tried to kill some jobs, but something happened and they are still going, "
                             "so I'll try again")
                time.sleep(5)

    def getIssuedBatchJobIDs(self):
        """A list of jobs (as jobIDs) currently issued (may be running, or maybe 
        just waiting).
        """
        return self.currentjobs

    def getRunningBatchJobIDs(self):
        """Gets a map of jobs (as jobIDs) currently running (not just waiting) 
        and a how long they have been running for (in seconds).
        """
        times = {}
        currentjobs = set()
        for x in self.getIssuedBatchJobIDs():
            if x in self.lsfJobIDs:
                currentjobs.add(self.lsfJobIDs[x])
            else:
                #not yet started
                pass
        process = subprocess.Popen(["bjobs"], stdout = subprocess.PIPE)

        for curline in process.stdout:
            items = curline.strip().split()
            if (len(items) > 9 and (items[0]) in currentjobs) and items[2] == 'RUN':
                jobstart = "/".join(items[7:9]) + '/' + str(date.today().year)
                jobstart = jobstart + ' ' + items[9]
                jobstart = time.mktime(time.strptime(jobstart,"%b/%d/%Y %H:%M"))
                jobstart = time.mktime(time.strptime(jobstart,"%m/%d/%Y %H:%M:%S"))
                times[self.jobIDs[(items[0])]] = time.time() - jobstart
        return times

    def getUpdatedBatchJob(self, maxWait):
        try:
            sgeJobID, retcode = self.updatedJobsQueue.get(timeout=maxWait)
            self.updatedJobsQueue.task_done()
            jobID, retcode = (self.jobIDs[sgeJobID], retcode)
            self.currentjobs -= {self.jobIDs[sgeJobID]}
        except Empty:
            pass
        else:
            return jobID, retcode, None

    def getWaitDuration(self):
        """We give parasol a second to catch its breath (in seconds)
        """
        #return 0.0
        return 15

    @classmethod
    def getRescueBatchJobFrequency(cls):
        """Parasol leaks jobs, but rescuing jobs involves calls to parasol list jobs and pstat2,
        making it expensive. We allow this every 10 minutes..
        """
        return 1800

    def obtainSystemConstants(self):
        p = subprocess.Popen(["lshosts"], stdout = subprocess.PIPE, stderr = subprocess.STDOUT)

        line = p.stdout.readline()
        items = line.strip().split()
        num_columns = len(items)
        cpu_index = None
        mem_index = None
        for i in range(num_columns):
                if items[i] == 'ncpus':
                        cpu_index = i
                elif items[i] == 'maxmem':
                        mem_index = i

        if cpu_index is None or mem_index is None:
                RuntimeError("lshosts command does not return ncpus or maxmem columns")

        p.stdout.readline()

        self.maxCPU = 0
        self.maxMEM = MemoryString("0")
        for line in p.stdout:
                items = line.strip().split()
                if len(items) < num_columns:
                        RuntimeError("lshosts output has a varying number of columns")
                if items[cpu_index] != '-' and items[cpu_index] > self.maxCPU:
                        self.maxCPU = items[cpu_index]
                if items[mem_index] != '-' and MemoryString(items[mem_index]) > self.maxMEM:
                        self.maxMEM = MemoryString(items[mem_index])

        if self.maxCPU is 0 or self.maxMEM is 0:
                RuntimeError("lshosts returns null ncpus or maxmem info")
        logger.debug("Got the maxCPU: %s" % (self.maxMEM))
开发者ID:chapmanb,项目名称:toil,代码行数:104,代码来源:lsf.py

示例13: Rigger

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import task_done [as 别名]
class Rigger(object):
    """ A Rigger event framework instance.

    The Rigger object holds all configuration and instances of plugins. By default Rigger accepts
    a configuration file name to parse, though it is perfectly acceptable to pass the configuration
    into the ``self.config`` attribute.

    Args:
        config_file: A configuration file holding all of Riggers base and plugin configuration.
    """
    def __init__(self, config_file):
        self.gdl = threading.Lock()
        self.pre_callbacks = defaultdict(dict)
        self.post_callbacks = defaultdict(dict)
        self.plugins = {}
        self.config_file = config_file
        self.squash_exceptions = False
        self.initialized = False
        self._task_list = {}
        self._queue_lock = threading.Lock()
        self._global_queue = Queue()
        self._background_queue = Queue()
        self._server_shutdown = False
        self._zmq_event_handler_shutdown = False
        self._global_queue_shutdown = False
        self._background_queue_shutdown = False

        globt = threading.Thread(target=self.process_queue, name="global_queue_processor")
        globt.start()
        bgt = threading.Thread(
            target=self.process_background_queue, name="background_queue_processor")
        bgt.start()

    def process_queue(self):
        """
        The ``process_queue`` thread manages taking events on and off of the global queue.
        Both TCP and in-object fire_hooks place events onto the global_queue and these are both
        handled by the same handler called ``process_hook``. If there is an exception during
        processing, the exception is printed and execution continues.
        """
        while not self._global_queue_shutdown:
            while not self._global_queue.empty():
                with self._queue_lock:
                    tid = self._global_queue.get()
                    obj = self._task_list[tid].json_dict
                    self._task_list[tid].status = Task.RUNNING
                try:
                    loc, glo = self.process_hook(obj['hook_name'], **obj['data'])
                    combined_dict = {}
                    combined_dict.update(glo)
                    combined_dict.update(loc)
                    self._task_list[tid].output = combined_dict
                except Exception as e:
                    self.log_message(e)
                with self._queue_lock:
                    self._global_queue.task_done()
                    self._task_list[tid].status = Task.FINISHED
                if not self._task_list[tid].json_dict.get('grab_result', None):
                    del self._task_list[tid]
            time.sleep(0.1)

    def process_background_queue(self):
        """
        The ``process_background_queue`` manages the hooks which have been backgrounded. In this
        respect the tasks that are completed are not required to continue with the test and as such
        can be forgotten about. An example of this would be some that sends an email, or tars up
        files, it has all the information it needs and the main process doesn't need to wait for it
        to complete.
        """
        while not self._background_queue_shutdown:
            while not self._background_queue.empty():
                obj = self._background_queue.get()
                try:
                    local, globals_updates = self.process_callbacks(obj['cb'], obj['kwargs'])
                    with self.gdl:
                        self.global_data = recursive_update(self.global_data, globals_updates)
                except Exception as e:
                    self.log_message(e)
                self._background_queue.task_done()
            time.sleep(0.1)

    def zmq_event_handler(self, zmq_socket_address):
        """
        The ``zmq_event_handler`` thread receives (and responds to) updates from the
        zmq socket, which is normally embedded in the web server running alongside this
        riggerlib instance, in its own process.

        """
        ctx = zmq.Context()
        zmq_socket = ctx.socket(zmq.REP)
        zmq_socket.set(zmq.RCVTIMEO, 300)
        zmq_socket.bind(zmq_socket_address)

        def zmq_reply(message, **extra):
            payload = {'message': message}
            payload.update(extra)
            zmq_socket.send_json(payload)
        bad_request = partial(zmq_reply, 'BAD REQUEST')

        while not self._zmq_event_handler_shutdown:
#.........这里部分代码省略.........
开发者ID:psav,项目名称:riggerlib,代码行数:103,代码来源:server.py

示例14: TCPHandler

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import task_done [as 别名]

#.........这里部分代码省略.........

        fields = {
            'level': record.levelname,
            'filename': record.pathname,
            'lineno': record.lineno,
            'method': record.funcName,
        }
        if record.exc_info:
            fields['exception'] = str(record.exc_info)
            fields['traceback'] = format_exc(record.exc_info)

        log = {
            '@source_host': self.hostname,
            '@timestamp': timestamp,
            '@tags': [record.name],
            '@message': record.getMessage(),
            '@fields': fields,
        }
        return json.dumps(log)

    def emit(self, record):
        '''
        Send a LogRecord object formatted as json_event via a
        queue and worker thread.
        '''
        self.queue.put_nowait(record)

    def run(self):
        '''
        Main loop of the logger thread. All network I/O and exception handling
        originates here. Strings are consumed from self.queue and sent to
        self.sock, creating a new connection if necessary.

        If any exceptions are caught, the message is put() back on the queue
        and the exception is allowed to propagate up through
        logging.Handler.handleError(), potentially causing this thread to abort.
        '''
        INTERNAL_LOG.debug('Log I/O thread started')
        while True:
            record = self.queue.get()
            if record is None:
                break

            jsonrecord = self.jsonify(record)
            jsonrecord = '%s\n' % jsonrecord

            try:
                if self.sock is None:
                    self.connect()
                self.send(jsonrecord)
            except Exception:
                # This exception will be silently ignored and the message
                # requeued unless self.raiseExceptions=1
                self.queue.put(record)
                self.handleError(record)
            self.queue.task_done()
        INTERNAL_LOG.debug('Log I/O thread exited cleanly')

    def send(self, data):
        '''
        Keep calling SSLSocket.write until the entire message has been sent
        '''
        while len(data) > 0:
            if self.ssl_ca_file:
                sent = self.sock.write(data)
            else:
                sent = self.sock.send(data)
            data = data[sent:]
        self.connect_wait = BACKOFF_INITIAL

    def handleError(self, record):
        '''
        If an error occurs trying to send the log message, close the connection
        and delegate the exception handling to the superclass' handleError,
        which raises the exception (potentially killing the log thread) unless
        self.raiseExceptions is False.
        http://hg.python.org/cpython/file/e64d4518b23c/Lib/logging/__init__.py#l797
        '''
        INTERNAL_LOG.exception('Unable to send log')
        self.cleanup()
        self.connect_wait *= BACKOFF_MULTIPLE
        logging.Handler.handleError(self, record)

    def cleanup(self):
        '''
        If the socket to the server is still open, close it. Otherwise, do
        nothing.
        '''
        if self.sock:
            INTERNAL_LOG.info('Closing socket')
            self.sock.close()
            self.sock = None

    def close(self):
        '''
        Send a sentinel None object to the worker thread, telling it to exit
        and disconnect from the server.
        '''
        self.queue.put(None)
        self.cleanup()
开发者ID:uber,项目名称:clay,代码行数:104,代码来源:logger.py

示例15: GraphiteReporter

# 需要导入模块: from six.moves.queue import Queue [as 别名]
# 或者: from six.moves.queue.Queue import task_done [as 别名]
class GraphiteReporter(threading.Thread):
  """A graphite reporter thread."""

  def __init__(self, host, port, maxQueueSize=10000):
    """Connect to a Graphite server on host:port."""
    threading.Thread.__init__(self)

    self.host, self.port = host, port
    self.sock = None
    self.queue = Queue()
    self.maxQueueSize = maxQueueSize
    self.daemon = True


  def run(self):
    """Run the thread."""
    while True:
      try:
        try:
          name, value, valueType, stamp = self.queue.get()
        except TypeError:
          break
        self.log(name, value, valueType, stamp)
      finally:
        self.queue.task_done()


  def connect(self):
    """Connects to the Graphite server if not already connected."""
    if self.sock is not None:
      return
    backoff = 0.01
    while True:
      try:
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.settimeout(5)
        sock.connect((self.host, self.port))
        self.sock = sock
        return
      except socket.error:
        time.sleep(random.uniform(0, 2.0*backoff))
        backoff = min(backoff*2.0, 5.0)


  def disconnect(self):
    """Disconnect from the Graphite server if connected."""
    if self.sock is not None:
      try:
        self.sock.close()
      except socket.error:
        pass
      finally:
        self.sock = None


  def _sendMsg(self, msg):
    """Send a line to graphite. Retry with exponential backoff."""
    if not self.sock:
      self.connect()
    if not isinstance(msg, binary_type):
      msg = msg.encode("UTF-8")

    backoff = 0.001
    while True:
      try:
        self.sock.sendall(msg)
        break
      except socket.error:
        log.warning('Graphite connection error', exc_info = True)
        self.disconnect()
        time.sleep(random.uniform(0, 2.0*backoff))
        backoff = min(backoff*2.0, 5.0)
        self.connect()


  def _sanitizeName(self, name):
    """Sanitize a metric name."""
    return name.replace(' ', '-')


  def log(self, name, value, valueType=None, stamp=None):
    """Log a named numeric value. The value type may be 'value',
    'count', or None."""
    if type(value) == float:
      form = "%s%s %2.2f %d\n"
    else:
      form = "%s%s %s %d\n"

    if valueType is not None and len(valueType) > 0 and valueType[0] != '.':
      valueType = '.' + valueType

    if not stamp:
      stamp = time.time()

    self._sendMsg(form % (self._sanitizeName(name), valueType or '', value, stamp))


  def enqueue(self, name, value, valueType=None, stamp=None):
    """Enqueue a call to log."""
    # If queue is too large, refuse to log.
#.........这里部分代码省略.........
开发者ID:Cue,项目名称:scales,代码行数:103,代码来源:util.py


注:本文中的six.moves.queue.Queue.task_done方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。