本文整理汇总了Python中cpppo.timer函数的典型用法代码示例。如果您正苦于以下问题:Python timer函数的具体用法?Python timer怎么用?Python timer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了timer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: check
def check( self, predicate, deadline=None ):
"""Check if 'predicate' comes True before 'deadline', every self.rate seconds"""
done = predicate()
while not done and ( deadline is None or cpppo.timer() < deadline ):
time.sleep( self.rate if deadline is None
else min( self.rate, max( 0, deadline - cpppo.timer() )))
done = predicate()
return done
示例2: complete
def complete( self, actuator=1, svoff=False, timeout=None ):
"""Ensure that any prior operation on the actuator is complete w/in timeout; return True iff the
current operation is detected as being complete.
According to the documentation, the absence of the X4B "INP" flag should indicate
completion, (see LEC Modbus RTU op Manual.pdf, section 4.4). However, this does not work,
and the X48 "BUSY" flag seems to serve this purpose; perhaps it is a documentation error.
If 'svoff' is True, we'll also turn off the servo (clear Y19_SVON) if we detect completion.
"""
begin = cpppo.timer()
if timeout is None:
timeout = self.TIMEOUT
unit = self.unit( uid=actuator )
# Loop on True/None; terminate only on False; X48_BUSY contains 0/False when complete
complete = self.check(
predicate=lambda: unit.read( data.X48_BUSY.addr ) == False,
deadline=None if timeout is None else begin + timeout )
( logging.warning if not complete else logging.detail )(
"Complete: actuator %3d %s", actuator, "success" if complete else "failure" )
if svoff and complete:
logging.detail( "ServoOff: actuator %3d", actuator )
unit.write( data.Y19_SVON.addr, 0 )
return complete
示例3: _recv
def _recv( self, size ):
"""On a receive timeout, closes the socket and raises a ConnectionException. Otherwise,
returns the available input"""
if not self.socket:
raise ConnectionException( self.__str_() )
begun = cpppo.timer()
timeout = self.timeout # This computes the remaining timeout available
r, w, e = select.select( [self.socket], [], [], timeout )
if r:
result = super( ModbusTcpClientTimeout, self )._recv( size )
log.debug( "Receive success in %7.3f/%7.3fs" % ( cpppo.timer() - begun, timeout ) )
return result
self.close()
log.debug( "Receive failure in %7.3f/%7.3fs" % ( cpppo.timer() - begun, timeout ) )
raise ConnectionException("Receive from (%s, %s) failed: Timeout" % (
self.host, self.port ))
示例4: test_smc_basic
def test_smc_basic( simulated_actuator_1, simulated_actuator_2 ):
command,address = simulated_actuator_1
command,address = simulated_actuator_2
positioner = smc.smc_modbus()
'''
# Initiate polling of actuator 2
assert positioner.status( actuator=2 )['current_position'] is None
'''
# Test polling of actuator 1
status = None
now = cpppo.timer()
while cpppo.timer() < now + 1 and (
not status
or status['current_position'] is None ):
time.sleep( .1 )
status = positioner.status( actuator=1 )
assert status['current_position'] == 0
# Modify actuator 1 current position
unit = positioner.unit( uid=1 )
unit.write( 40001 + 0x9000, 0x0000 )
unit.write( 40001 + 0x9001, 0x3a98 )
# make certain it gets polled correctly with updated value
now = cpppo.timer()
status = None
while cpppo.timer() < now + 1 and (
not status
or status['current_position'] != 15000 ):
time.sleep( .1 )
status = positioner.status( actuator=1 )
assert status['current_position'] == 15000
'''
# but the unmodified actuator should still now be polling a 0...
assert positioner.status( actuator=2 )['current_position'] is 0
'''
positioner.close()
示例5: connect
def connect(self):
"""Duplicate the functionality of connect (handling optional .source_address attribute added
in pymodbus 1.2.0), but pass the computed remaining timeout.
"""
if self.socket: return True
log.debug( "Connecting to (%s, %s)" % ( self.host, self.port ))
begun = cpppo.timer()
timeout = self.timeout # This computes the remaining timeout available
try:
self.socket = socket.create_connection( (self.host, self.port),
timeout=timeout, source_address=getattr( self, 'source_address', None ))
except socket.error as exc:
log.debug('Connection to (%s, %s) failed: %s' % (
self.host, self.port, exc ))
self.close()
finally:
log.debug( "Connect completed in %.3fs" % ( cpppo.timer() - begun ))
return self.socket != None
示例6: timeout
def timeout( self ):
"""Returns the Defaults.Timeout, if no timeout = True|#.# (a hard timeout) has been specified."""
if self._timeout in (None, True):
log.debug( "Transaction timeout default: %.3fs" % ( Defaults.Timeout ))
return Defaults.Timeout
now = cpppo.timer()
eta = self._started + self._timeout
if eta > now:
log.debug( "Transaction timeout remaining: %.3fs" % ( eta - now ))
return eta - now
log.debug( "Transaction timeout expired" )
return 0
示例7: logix_remote
def logix_remote( count, svraddr, kwargs ):
try:
time.sleep(.25) # Wait for server to be established
# Confirm that a known Register encodes as expected
data = cpppo.dotdict()
data.enip = {}
data.enip.options = 0
data.enip.session_handle = 0
data.enip.status = 0
data.enip.sender_context = {}
data.enip.sender_context.input = bytearray( [0x00] * 8 )
data.enip.CIP = {}
data.enip.CIP.register = {}
data.enip.CIP.register.options = 0
data.enip.CIP.register.protocol_version = 1
data.enip.input = bytearray( enip.CIP.produce( data.enip ))
data.input = bytearray( enip.enip_encode( data.enip ))
log.normal( "Register Request: %r" % data )
assert bytes( data.input ) == rss_004_request
# Try to Register a real session, followed by commands
timeout = 5
begun = cpppo.timer()
cli = client.client( host=svraddr[0], port=svraddr[1] )
assert cli.writable( timeout=timeout )
elapsed = cpppo.timer() - begun
log.normal( "Client Connected in %7.3f/%7.3fs" % ( elapsed, timeout ))
begun = cpppo.timer()
with cli:
cli.register( timeout=timeout )
data,elapsed = client.await( cli, timeout=timeout )
log.normal( "Client Register Rcvd %7.3f/%7.3fs: %r", elapsed, timeout, data )
assert data is not None and 'enip.CIP.register' in data, "Failed to receive Register response"
assert data.enip.status == 0, "Register response indicates failure: %s" % data.enip.status
# Establish the EtherNet/IP "session handle" used by all further requests
cli.session = data.enip.session_handle
start = cpppo.timer()
with cli:
for _ in range( count ):
begun = cpppo.timer()
cli.read( path=[{'symbolic': 'SCADA'}, {'element': 12}],
elements=201, offset=2, timeout=timeout )
data,elapsed = client.await( cli, timeout=timeout )
log.normal( "Client ReadFrg. Rcvd %7.3f/%7.3fs: %r", elapsed, timeout, data )
duration = cpppo.timer() - start
log.warning( "Client ReadFrg. Average %7.3f TPS (%7.3fs ea)." % ( count / duration, duration / count ))
log.normal( "Signal shutdown w/ server.control in object %s", id( kwargs['server']['control'] ))
finally:
kwargs['server']['control'].done= True # Signal the server to terminate
示例8: remember
def remember( self, what, last, chng ):
""" All new events are prepended (inserted at index 0) to the ._events
container, so newest events appear earliest. """
if self._events is not None:
level = ( self._level( what, last, chng )
if hasattr( self._level, '__call__' )
else self._level )
if level is not None and level >= 0: # may be +'ve/0/None, or -'ve (ignored)
message = ( self._formatter( what, last, chng )
if self._formatter
else "%s (was %s)" % ( cpppo.reprlib.repr( chng ), cpppo.reprlib.repr( last )))
self._events.insert( 0, {
"time": cpppo.timer(),
"level": level,
"group": self._group,
"description": self._descr,
"message": message,
} )
示例9: signal_service
def signal_service():
"""Service known signals. When logging, default to logat NORMAL, but ensure the
message is seen if higher (eg. WARNING). Support being in unknown logging
levels when in/decreasing.
"""
global levelmap_change
if levelmap_change:
rootlog = logging.getLogger()
actual = rootlog.getEffectiveLevel()
closest = min( logging_levelmap.values(), key=lambda x:abs(x-actual) )
highest = max( logging_levelmap.keys() )
for i,lvl in logging_levelmap.items():
if lvl == closest:
key = i + levelmap_change
break
desired = logging_levelmap.get( key, logging.DEBUG if key > highest else logging.ERROR )
if actual != desired:
rootlog.setLevel( desired )
levelmap_change = 0
global logrotate_signalled
global uptime_signalled
if logrotate_signalled:
logrotate_signalled = False
uptime_signalled = True
rootlog = logging.getLogger()
actual = rootlog.getEffectiveLevel()
rootlog.log( max( logging.NORMAL, actual ), "Rotating log files due to signal" )
for hdlr in logging.root.handlers:
if isinstance( hdlr, logging.FileHandler ):
hdlr.close()
global uptime_basis
if uptime_signalled:
uptime_signalled = False
uptime = cpppo.timer() - uptime_basis
rootlog = logging.getLogger()
actual = rootlog.getEffectiveLevel()
rootlog.log( max( logging.NORMAL, actual ), "Uptime: %3d:%02d:%06.3f",
int( uptime // 3600 ), int( uptime % 3600 // 60 ), uptime % 60 )
示例10: _receive
def _receive( self ):
""" Receive any previously cached data, with a latency of roughly 1/2
the specified polling rate"""
now = cpppo.timer()
if self._polled + self.rate > now:
return
log.debug( "%s polled" % ( self.description ))
self._polled += self.rate
for address, vlist in self._cache.items():
while vlist:
t, value = vlist[0]
if t > self._polled:
break
# When offline, any value other than the last one written to the
# PLC is lost; _store will discard if offline, logging with -x>
# indicates that we'll never see it.
if self.online or len( vlist ) > 1:
self._store( address, value )
if not self.online:
break
vlist.popleft()
示例11: events
def events( self, since=None, purge=False ):
""" A generator yielding the stream of relevant events (None if since is
None, otherwise limited to those with a ["time"] strictly greater than
'since'), optionally purging as we go; if we complete the iteration, our
._events will be replaced with the retain list. This is not a property,
because it doesn't return simple value, and we want to (optionally)
purge the internal _events list, or provide a 'since' time."""
unique = set()
retain = []
now = cpppo.timer()
for e in self._events:
if self._retain:
if e["description"] not in unique:
unique.add( e["description"] )
else:
if e["time"] + self._retain < now:
# old event, and we've seen this description before; skip
log.debug( "Purging event: %r" % ( e ))
continue
retain.append( e )
if since is not None and e["time"] > since:
yield e
if self._retain:
self._events= retain
示例12: test_history_unparsable
def test_history_unparsable():
"""Test history files rendered unparsable due to dropouts. This should be handled with no problem
except if the initial frame of register data on the first file is missing.
"""
for _ in range( 3 ):
path = "/tmp/test_unparsable_%d" % random.randint( 100000, 999999 )
if os.path.exists( path ):
continue
assert not os.path.exists( path ), "Couldn't find an unused name: %s" % path
files = []
try:
# Create a series of history files with decreasing timestamps as the numeric extension
# increases, containing records that are invalid.
now = timer()
v = 10000
secs = 10
secs_ext = 1.0 # adjust range of history to target out by this +/-
basisext = .5 # adjust start basis time from now by this +/-
minfactor = .25
maxfactor = 2.0
maxlatency = .25
# 1/N file lines corrupted (kills 2 records; the current and following). 0 --> no errors
maxerror = random.choice( [ None, 3, 10, 100 ] )
oldest = None
newest = None
logging.normal( "Corrupting %s of all history lines", None if not maxerror else "1/%d" % maxerror )
for e in range( secs ):
f = path + (( '.%d' % e ) if e else '') # 0'th file has no extension
files.append( f )
with logger( f ) as l:
ssend = 100
for ss in range( 0, ssend ): # subseconds up to but not including ssend...
js = json.dumps( { 40001: v + e * 1000 + (ss * 1000 // ssend) } ) + '\n'
if maxerror and not random.randint( 0, maxerror ):
# Truncate some of the records (as would occur in a filesystem full or halt)
js = js[:random.randint( 0, len( js ) - 1)]
ts = timestamp( now - e + ss/ssend )
if oldest is None or ts < oldest:
oldest = ts
if newest is None or ts > newest:
newest = ts
l._append( '\t'.join( (str( ts ),json.dumps( None ),js) ) )
# Load the historical records. This will be robust against all errors except if the first
# line of the first history file opened is corrupt, and we therefore cannot get the initial
# frame of register data.
historical = timestamp( now - random.uniform( -secs_ext, secs + secs_ext ))
basisdelay = random.uniform( -basisext, +basisext )
basis = now + basisdelay
factor = random.uniform( minfactor, maxfactor )
lookahead = 1.0
on_bad_iframe = random.choice( (loader.RAISE, loader.FAIL, loader.SUPPRESS, loader.SUPPRESS, loader.SUPPRESS) )
on_bad_data = random.choice( (loader.RAISE, loader.FAIL, loader.SUPPRESS, loader.SUPPRESS, loader.SUPPRESS) )
logging.normal( "Playback starts %s (%.1f%%) of history %s-%s, in %.3fs, at x %.2f rate w/%.1fs lookahead, on_bad_iframe=%s, on_bad_data=%s",
historical, ( historical.value - oldest.value ) * 100 / ( newest.value - oldest.value ),
oldest, newest, basisdelay, factor, lookahead,
"SUPPRESS" if on_bad_iframe == loader.SUPPRESS else "FAIL" if on_bad_iframe == loader.FAIL else "RAISE",
"SUPPRESS" if on_bad_data == loader.SUPPRESS else "FAIL" if on_bad_data == loader.FAIL else "RAISE" )
ld = loader( path,
historical=historical, basis=basis, factor=factor, lookahead=lookahead )
dur = basisext + ( secs_ext + secs + secs_ext ) / factor + basisext + 2*maxlatency # Don't be tooo strict
beg = timer()
count = 0
while ld:
assert timer() - beg < dur, "The loader should have ended"
cur,events = ld.load( on_bad_iframe=on_bad_iframe, on_bad_data=on_bad_data )
count += len( events )
logging.normal( "%s loaded up to %s; %d future, %d values: %d events: %s",
ld, cur, len( ld.future ), len( ld.values ), len( events ),
repr( events ) if logging.root.isEnabledFor( logging.DEBUG ) else reprlib.repr( events ))
time.sleep( random.uniform( 0.0, maxlatency ))
if on_bad_data == ld.FAIL or on_bad_iframe == ld.FAIL:
assert ld.state in (ld.COMPLETE, ld.FAILED)
else:
assert ld.state == ld.COMPLETE
except IframeError as exc:
logging.warning( "Detected error on initial frame of registers in first history file; failure expected: %s", exc )
assert ld.state == ld.FAILED and count == 0, "Shouldn't have loaded any events -- only iframe failures expected"
except DataError as exc:
logging.warning( "Detected error on registers data in a history file; failure expected: %s", exc )
assert ld.state == ld.FAILED
except Exception as exc:
logging.normal( "Test failed: %s", exc )
for f in files:
if os.path.exists( f ):
logging.normal( "%s:\n %s", f, " ".join( l for l in open( f )))
else:
logging.warning( "%s: Couldn't find file", f )
raise
finally:
for f in files:
#.........这里部分代码省略.........
示例13: test_history_sequential
def test_history_sequential():
for _ in range( 3 ):
path = "/tmp/test_sequential_%d" % random.randint( 100000, 999999 )
if os.path.exists( path ):
continue
assert not os.path.exists( path ), "Couldn't find an unused name: %s" % path
files = []
try:
# Create a series of history files with decreasing timestamps as the numeric extension
# increases. Note: times are truncated to milliseconds, so timestamps saved out will
# probably evaluate as < the original value when read back in! Since each file contains
# only one record, we must be careful to use 'strict', to ensure we open the next file
# strictly greater than the last timestamp (or we'll open the same file again!)
now = timer()
count = 10
for e in range( count ):
f = path + (( '.%d' % e ) if e else '') # 0'th file has 0 extension
files.append( f )
with logger( f ) as l:
l.write( { 40001: count - e }, now=now - e )
if e:
# Compress .1 onward using a random format; randomly delete origin uncompressed file
# so sometimes both files exist
if random.choice( (True, False, False, False) ):
continue # Don't make a compressed version of some files
fz = f + '.%s' % random.choice( ('gz', 'bz2', 'xz') )
files.append( fz )
with opener( fz, mode='wb' ) as fd:
fd.write( open( f, 'rb' ).read() )
if random.choice( (True, False, False) ):
continue # Don't remove some of the uncompressed files
os.unlink( f )
files.pop( files.index( f ))
# Attempt to begin loading history around the middle of the recording
rdr = reader( path,
historical=now - random.uniform( 3.0, 9.0 ),
basis=now + random.uniform( -.5, +.5 ),
factor=3 )
# Begin with the first historical file before our computed advancing historical time (we
# could provide a specific timestamp here, if we wanted). No lookahead.
ts_l = None
f_l = None
after = False # only first open is "before"; rest are "after"
strict = False # only goes false when timestamp increases in the same file
deadline = now + count
while timer() <= deadline:
# open next file beginning after the last ts
o = rdr.open( target=ts_l, after=after, strict=strict ) # Generator; doesn't do much here...
after = True
strict = True
for (f,l,cur),(ts,js) in o: # raises HistoryExhausted on open() generator failure
assert ts_l is None or ts >= ts_l, \
"Historical record out of sequence; %s isn't >= %s" % ( ts, ts_l )
ts_l = ts
if js is None:
logging.info( "@%s: not yet available", ts )
assert ts > cur, "Next record should have been returned; not in future"
time.sleep( .1 )
else:
logging.normal( "@%s: %r", ts, js )
assert ts <= cur, "Next record shouldn't have been returned; yet future"
if f == f_l and ts > ts_l:
strict = False
f_l,ts_l= f,ts
assert False, "Should have raised HistoryExhausted by now"
except HistoryExhausted as exc:
logging.normal( "History exhausted: %s", exc )
except Exception as exc:
logging.normal( "Test failed: %s", exc )
raise
finally:
for f in files:
logging.detail( "unlinking %s", f )
try:
os.unlink( f )
except:
pass
示例14: test_history_timestamp
#.........这里部分代码省略.........
assert not(ts > ts + l)
assert ts > ts - l
assert ts >= ts + s
assert ts >= ts - s
assert not(ts >= ts + l)
assert ts >= ts - l
finally:
timestamp._precision,timestamp._epsilon = save
# Maintain DST specificity when rendering in DST-specific timezones? Nope, only when using
# specially constructed non-DST versions of timezones, when they are made available by pytz.
timestamp.support_abbreviations( None, reset=True )
assert timestamp.timezone_info('MST') == (pytz.timezone( 'MST' ),None)
assert timestamp( 1399326141.999836 ).render(
tzinfo='MST', ms=False ) == '2014-05-05 14:42:21 MST'
# Get MST/MDT etc., and CET/CEST abbreviations
timestamp.support_abbreviations( ['CA','Europe/Berlin'], reset=True )
assert timestamp.timezone_info('MST') == (pytz.timezone( 'America/Edmonton' ),False)
assert timestamp( 1399326141.999836 ).render(
tzinfo='MST', ms=False ) == '2014-05-05 15:42:21 MDT'
# $ TZ=UTC date [email protected]
# Wed Jan 1 07:00:00 UTC 2014
# 1396531199
# Thu Apr 3 07:19:59 MDT 2014
assert '2014-01-02 03:04:55.123'.translate( trtab ) == '2014 01 02 03 04 55 123'
cnt = 10000
beg = timer()
for _ in range( cnt ):
utc1 = utc_strp( '2014-01-02 03:04:55.123' )
dur1 = timer() - beg
beg = timer()
for _ in range( cnt ):
utc2 = utc_trns( '2014-01-02 03:04:55.123' )
dur2 = timer() - beg
beg = timer()
for _ in range( cnt ):
utc3 = timestamp.datetime_from_string( '2014-01-02 03:04:55.123' )
dur3 = timer() - beg
assert utc1.strftime( timestamp._fmt ) \
== utc2.strftime( timestamp._fmt ) \
== utc3.strftime( timestamp._fmt ) == '2014-01-02 03:04:55'
logging.detail( "strptime: %d/s, translate: %d/s, timestamp: %d/s", cnt/dur1, cnt/dur2, cnt/dur3 )
now = timer()
assert timestamp( now ) < timestamp( now + 1 )
# From a numeric timestamp
ts = timestamp( 1396531199 )
assert ts.utc == '2014-04-03 13:19:59.000' == str( ts )
assert ts.local in ( '2014-04-03 07:19:59 MDT',
'2014-04-03 06:19:59 MST',
'2014-04-03 13:19:59 UTC' )
# From a string UTC time
dt = timestamp.datetime_from_string( '2014-01-01 07:00:00.0' )
assert str( dt ) == '2014-01-01 07:00:00+00:00'
assert repr( dt ) == 'datetime.datetime(2014, 1, 1, 7, 0, tzinfo=<UTC>)'
#assert dt.strftime( '%s' ) != '1388559600' # !? (will fail if machine is in UTC timezone )
示例15: _poller
def _poller( self ):
"""Asynchronously (ie. in another thread) poll all the specified registers, on the designated
poll cycle. Until we have something to do (self.rate isn't None), just wait.
We'll log whenever we begin/cease polling any given range of registers.
"""
target = cpppo.timer()
while not self.done and logging: # Module may be gone in shutting down
# Poller is dormant 'til a non-None/zero rate and data specified
if not self.rate or not self._data:
time.sleep( .1 )
continue
# Delay 'til poll target
now = cpppo.timer()
if now < target:
time.sleep( target - now )
now = cpppo.timer()
# Ready for another poll. Check if we've slipped (missed cycle(s)), and then compute
# the next poll cycle target; this attempts to retain cadence.
slipped = int( ( now - target ) / self.rate )
if slipped:
log.normal( "Polling slipped; missed %d cycles" % ( slipped ))
target += self.rate * ( slipped + 1 )
# Perform polls, re-acquiring lock between each poll to allow others
# to interject. We'll sort the known register addresses in _data,
# merge ranges, read the values from the PLC, and store them in
# _data.
# TODO: Split on and optimize counts for differing multi-register
# limits for Coils, Registers
# WARN: list comprehension over self._data must be atomic, because
# we don't lock, and someone could call read/poll, adding entries to
# self._data between reads. However, since merge's register ranges
# are sorted, all self._data keys are consumed before the list is
# iterated.
rngs = set( merge( ( (a,1) for a in self._data ), reach=self.reach ))
succ = set()
fail = set()
busy = 0.0
for address, count in rngs:
with self.lock:
begin = cpppo.timer()
try:
# Read values; on success (no exception, something other
# than None returned), immediately take online;
# otherwise attempts to _store will be rejected.
value = self._read( address, count )
if not self.online:
self.online = True
log.critical( "Polling: PLC %s online; success polling %s: %s" % (
self.description, address, cpppo.reprlib.repr( value )))
if (address,count) not in self.polling:
log.detail( "Polling %6d-%-6d (%5d)" % ( address, address+count-1, count ))
succ.add( (address, count) )
self._store( address, value ) # Handle scalar or list/tuple value(s)
except ModbusException as exc:
# Modbus error; Couldn't read the given range. Only log
# the first time failure to poll this range is detected
fail.add( (address, count) )
if (address, count) not in self.failing:
log.warning( "Failing %6d-%-6d (%5d): %s" % (
address, address+count-1, count, str( exc )))
except Exception as exc:
# Something else; always log
fail.add( (address, count) )
log.warning( "Failing %6d-%-6d (%5d): %s" % (
address, address+count-1, count, traceback.format_exc() ))
busy += cpppo.timer() - begin
# Prioritize other lockers (ie. write). Contrary to popular opinion, sleep(0) does
# *not* effectively yield the current Thread's quanta, at least on Python 2.7.6!
time.sleep(0.001)
# We've already warned about polls that have failed; also log all
# polls that have ceased (failed, or been replaced by larger polls)
ceasing = self.polling - succ - fail
for address, count in ceasing:
log.info( "Ceasing %6d-%-6d (%5d)" % ( address, address+count-1, count ))
self.polling = succ
self.failing = fail
self.duration = busy
# The "load" is computed by comparing the "duration" of the last poll vs. the target
# poll rate (in seconds). A load of 1.0 indicates the polls consumed exactly 100% of
# the target rate. Compute loads over approximately the last 1, 5 and 15 minutes worth
# of polls. The load is the proportion of the current poll rate that is consumed by
# poll activity. Even if the load < 1.0, polls may "slip" due to other (eg. write)
# activity using PLC I/O capacity.
load = ( busy / self.rate ) if self.rate > 0 else 1.0
ppm = ( 60.0 / self.rate ) if self.rate > 0 else 1.0
self.load = tuple(
cpppo.exponential_moving_average( cur, load, 1.0 / ( minutes * ppm ))
for minutes,cur in zip((1, 5, 15), self.load ))
#.........这里部分代码省略.........