本文整理汇总了Python中glances.timer.Timer.reset方法的典型用法代码示例。如果您正苦于以下问题:Python Timer.reset方法的具体用法?Python Timer.reset怎么用?Python Timer.reset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类glances.timer.Timer
的用法示例。
在下文中一共展示了Timer.reset方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: GlancesAmp
# 需要导入模块: from glances.timer import Timer [as 别名]
# 或者: from glances.timer.Timer import reset [as 别名]
#.........这里部分代码省略.........
if self.enable():
for k in ['regex', 'refresh']:
if k not in self.configs:
logger.warning("AMP - {}: Can not find configuration key {} in section {}".format(self.NAME, k, self.amp_name))
self.configs['enable'] = 'false'
else:
logger.debug("AMP - {} is disabled".format(self.NAME))
# Init the count to 0
self.configs['count'] = 0
return self.enable()
def get(self, key):
"""Generic method to get the item in the AMP configuration"""
if key in self.configs:
return self.configs[key]
else:
return None
def enable(self):
"""Return True|False if the AMP is enabled in the configuration file (enable=true|false)."""
ret = self.get('enable')
if ret is None:
return False
else:
return ret.lower().startswith('true')
def regex(self):
"""Return regular expression used to identified the current application."""
return self.get('regex')
def refresh(self):
"""Return refresh time in seconds for the current application monitoring process."""
return self.get('refresh')
def one_line(self):
"""Return True|False if the AMP shoukd be displayed in oneline (one_lineline=true|false)."""
ret = self.get('one_line')
if ret is None:
return False
else:
return ret.lower().startswith('true')
def time_until_refresh(self):
"""Return time in seconds until refresh."""
return self.timer.get()
def should_update(self):
"""Return True is the AMP should be updated:
- AMP is enable
- only update every 'refresh' seconds
"""
if self.timer.finished():
self.timer.set(self.refresh())
self.timer.reset()
return self.enable()
return False
def set_count(self, count):
"""Set the number of processes matching the regex"""
self.configs['count'] = count
def count(self):
"""Get the number of processes matching the regex"""
return self.get('count')
def count_min(self):
"""Get the minimum number of processes"""
return self.get('countmin')
def count_max(self):
"""Get the maximum number of processes"""
return self.get('countmax')
def set_result(self, result, separator=''):
"""Store the result (string) into the result key of the AMP
if one_line is true then replace \n by separator
"""
if self.one_line():
self.configs['result'] = str(result).replace('\n', separator)
else:
self.configs['result'] = str(result)
def result(self):
""" Return the result of the AMP (as a string)"""
ret = self.get('result')
if ret is not None:
ret = u(ret)
return ret
def update_wrapper(self, process_list):
"""Wrapper for the children update"""
# Set the number of running process
self.set_count(len(process_list))
# Call the children update method
if self.should_update():
return self.update(process_list)
else:
return self.result()
示例2: Export
# 需要导入模块: from glances.timer import Timer [as 别名]
# 或者: from glances.timer.Timer import reset [as 别名]
class Export(GlancesExport):
"""This class manages the Graph export module."""
def __init__(self, config=None, args=None):
"""Init the export IF."""
super(Export, self).__init__(config=config, args=args)
# Load the Graph configuration file section (is exists)
self.export_enable = self.load_conf('graph',
options=['path',
'generate_every',
'width',
'height',
'style'])
# Manage options (command line arguments overwrite configuration file)
self.path = args.export_graph_path or self.path
self.generate_every = int(getattr(self, 'generate_every', 0))
self.width = int(getattr(self, 'width', 800))
self.height = int(getattr(self, 'height', 600))
self.style = getattr(pygal.style,
getattr(self, 'style', 'DarkStyle'),
pygal.style.DarkStyle)
# Create export folder
try:
os.makedirs(self.path)
except OSError as e:
if e.errno != errno.EEXIST:
logger.critical("Cannot create the Graph output folder {} ({})".format(self.path, e))
sys.exit(2)
# Check if output folder is writeable
try:
tempfile.TemporaryFile(dir=self.path)
except OSError as e:
logger.critical("Graph output folder {} is not writeable".format(self.path))
sys.exit(2)
logger.info("Graphs will be created in the {} folder".format(self.path))
logger.info("Graphs will be created when 'g' key is pressed (in the CLI interface)")
if self.generate_every != 0:
logger.info("Graphs will be created automatically every {} seconds".format(self.generate_every))
# Start the timer
self._timer = Timer(self.generate_every)
else:
self._timer = None
def exit(self):
"""Close the files."""
logger.debug("Finalise export interface %s" % self.export_name)
def update(self, stats):
"""Generate Graph file in the output folder."""
if self.generate_every != 0 and self._timer.finished():
self.args.generate_graph = True
self._timer.reset()
if not self.args.generate_graph:
return
plugins = stats.getPluginsList()
for plugin_name in plugins:
plugin = stats._plugins[plugin_name]
if plugin_name in self.plugins_to_export():
self.export(plugin_name, plugin.get_export_history())
logger.info("Graphs created in the folder {}".format(self.path))
self.args.generate_graph = False
def export(self, title, data):
"""Generate graph from the data.
Example for the mem plugin:
{'percent': [
(datetime.datetime(2018, 3, 24, 16, 27, 47, 282070), 51.8),
(datetime.datetime(2018, 3, 24, 16, 27, 47, 540999), 51.9),
(datetime.datetime(2018, 3, 24, 16, 27, 50, 653390), 52.0),
(datetime.datetime(2018, 3, 24, 16, 27, 53, 749702), 52.0),
(datetime.datetime(2018, 3, 24, 16, 27, 56, 825660), 52.0),
...
]
}
Return:
* True if the graph have been generated
* False if the graph have not been generated
"""
if data == {}:
return False
chart = DateTimeLine(title=title.capitalize(),
width=self.width,
height=self.height,
style=self.style,
show_dots=False,
legend_at_bottom=True,
x_label_rotation=20,
#.........这里部分代码省略.........
示例3: GlancesProcesses
# 需要导入模块: from glances.timer import Timer [as 别名]
# 或者: from glances.timer.Timer import reset [as 别名]
#.........这里部分代码省略.........
self.sort_key,
self.sort_reverse,
self.no_kernel_threads,
excluded_processes)
for i, node in enumerate(self.process_tree):
# Only retreive stats for visible processes (max_processes)
if self.max_processes is not None and i >= self.max_processes:
break
# add standard stats
new_stats = self.__get_process_stats(node.process,
mandatory_stats=False,
standard_stats=True,
extended_stats=False)
if new_stats is not None:
node.stats.update(new_stats)
# Add a specific time_since_update stats for bitrate
node.stats['time_since_update'] = time_since_update
else:
# Process optimization
# Only retreive stats for visible processes (max_processes)
if self.max_processes is not None:
# Sort the internal dict and cut the top N (Return a list of tuple)
# tuple=key (proc), dict (returned by __get_process_stats)
try:
processiter = sorted(iteritems(processdict),
key=lambda x: x[1][self.sort_key],
reverse=self.sort_reverse)
except (KeyError, TypeError) as e:
logger.error("Cannot sort process list by {0}: {1}".format(self.sort_key, e))
logger.error('{0}'.format(listitems(processdict)[0]))
# Fallback to all process (issue #423)
processloop = iteritems(processdict)
first = False
else:
processloop = processiter[0:self.max_processes]
first = True
else:
# Get all processes stats
processloop = iteritems(processdict)
first = False
for i in processloop:
# Already existing mandatory stats
procstat = i[1]
if self.max_processes is not None:
# Update with standard stats
# and extended stats but only for TOP (first) process
s = self.__get_process_stats(i[0],
mandatory_stats=False,
standard_stats=True,
extended_stats=first)
if s is None:
continue
procstat.update(s)
# Add a specific time_since_update stats for bitrate
procstat['time_since_update'] = time_since_update
# Update process list
self.processlist.append(procstat)
# Next...
first = False
# Build the all processes list used by the monitored list
self.allprocesslist = itervalues(processdict)
# Clean internals caches if timeout is reached
if self.cache_timer.finished():
self.username_cache = {}
self.cmdline_cache = {}
# Restart the timer
self.cache_timer.reset()
def getcount(self):
"""Get the number of processes."""
return self.processcount
def getalllist(self):
"""Get the allprocesslist."""
return self.allprocesslist
def getlist(self, sortedby=None):
"""Get the processlist."""
return self.processlist
def gettree(self):
"""Get the process tree."""
return self.process_tree
@property
def sort_key(self):
"""Get the current sort key."""
return self._sort_key
@sort_key.setter
def sort_key(self, key):
"""Set the current sort key."""
self._sort_key = key
示例4: GlancesProcesses
# 需要导入模块: from glances.timer import Timer [as 别名]
# 或者: from glances.timer.Timer import reset [as 别名]
class GlancesProcesses(object):
"""Get processed stats using the psutil library."""
def __init__(self, cache_timeout=60):
"""Init the class to collect stats about processes."""
# Add internals caches because PSUtil do not cache all the stats
# See: https://code.google.com/p/psutil/issues/detail?id=462
self.username_cache = {}
self.cmdline_cache = {}
# The internals caches will be cleaned each 'cache_timeout' seconds
self.cache_timeout = cache_timeout
self.cache_timer = Timer(self.cache_timeout)
# Init the io dict
# key = pid
# value = [ read_bytes_old, write_bytes_old ]
self.io_old = {}
# Wether or not to enable process tree
self._enable_tree = False
self.process_tree = None
# Init stats
self.auto_sort = True
self._sort_key = 'cpu_percent'
self.allprocesslist = []
self.processlist = []
self.reset_processcount()
# Tag to enable/disable the processes stats (to reduce the Glances CPU consumption)
# Default is to enable the processes stats
self.disable_tag = False
# Extended stats for top process is enable by default
self.disable_extended_tag = False
# Maximum number of processes showed in the UI (None if no limit)
self._max_processes = None
# Process filter is a regular expression
self._filter = GlancesFilter()
# Whether or not to hide kernel threads
self.no_kernel_threads = False
# Store maximums values in a dict
# Used in the UI to highlight the maximum value
self._max_values_list = ('cpu_percent', 'memory_percent')
# { 'cpu_percent': 0.0, 'memory_percent': 0.0 }
self._max_values = {}
self.reset_max_values()
def reset_processcount(self):
self.processcount = {'total': 0,
'running': 0,
'sleeping': 0,
'thread': 0,
'pid_max': None}
def enable(self):
"""Enable process stats."""
self.disable_tag = False
self.update()
def disable(self):
"""Disable process stats."""
self.disable_tag = True
def enable_extended(self):
"""Enable extended process stats."""
self.disable_extended_tag = False
self.update()
def disable_extended(self):
"""Disable extended process stats."""
self.disable_extended_tag = True
@property
def pid_max(self):
"""
Get the maximum PID value.
On Linux, the value is read from the `/proc/sys/kernel/pid_max` file.
From `man 5 proc`:
The default value for this file, 32768, results in the same range of
PIDs as on earlier kernels. On 32-bit platfroms, 32768 is the maximum
value for pid_max. On 64-bit systems, pid_max can be set to any value
up to 2^22 (PID_MAX_LIMIT, approximately 4 million).
If the file is unreadable or not available for whatever reason,
returns None.
Some other OSes:
- On FreeBSD and macOS the maximum is 99999.
- On OpenBSD >= 6.0 the maximum is 99999 (was 32766).
- On NetBSD the maximum is 30000.
#.........这里部分代码省略.........