本文整理汇总了Python中vsc.utils.cache.FileCache.load方法的典型用法代码示例。如果您正苦于以下问题:Python FileCache.load方法的具体用法?Python FileCache.load怎么用?Python FileCache.load使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类vsc.utils.cache.FileCache
的用法示例。
在下文中一共展示了FileCache.load方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: print_user_quota
# 需要导入模块: from vsc.utils.cache import FileCache [as 别名]
# 或者: from vsc.utils.cache.FileCache import load [as 别名]
def print_user_quota(opts, storage, user_name, now):
"""
Print the quota for the user, i.e., USR quota in all filesets the user has access to.
"""
print "User quota:"
for storage_name in opts.options.storage:
mount_point = storage[storage_name].login_mount_point
path_template = storage.path_templates[storage_name]['user']
path = os.path.join(mount_point, path_template[0], path_template[1](user_name), ".quota_user.json.gz")
cache = FileCache(path, True)
try:
(timestamp, quota) = cache.load('quota')
except TypeError:
logger.debug("Cannot load data from %s" % (path,))
print "%s: WARNING: No quota information found" % (storage_name,)
continue
if now - timestamp > opts.options.threshold:
print "%s: WARNING: no recent quota information (age of data is %d minutes)" % (storage_name,
(now-timestamp)/60)
else:
for (fileset, qi) in quota.quota_map.items():
pp = quota_pretty_print(storage_name, fileset, qi, opts.options.fileset_prefixes)
if pp:
print pp
示例2: main
# 需要导入模块: from vsc.utils.cache import FileCache [as 别名]
# 或者: from vsc.utils.cache.FileCache import load [as 别名]
def main():
options = {
'storage': ('the VSC filesystems that are checked by this script', None, 'extend', []),
'threshold': ('allowed the time difference between the cached quota and the time of running', None, 'store',
DEFAULT_ALLOWED_TIME_THRESHOLD),
}
opts = simple_option(options, config_files='/etc/quota_information.conf')
storage = VscStorage()
user_name = getpwuid(os.getuid())[0]
now = time.time()
for storage_name in opts.options.storage:
mount_point = storage[storage_name].login_mount_point
path_template = storage.path_templates[storage_name]['user']
path = os.path.join(mount_point, path_template[0], path_template(user_name))
cache = FileCache(path)
(timestamp, quota) = cache.load('quota')
if now - timestamp > opts.options.threshold:
print "%s: WARNING: no recent quota information (age of data is %d minutes)" % (storage_name,
(now-timestamp)/60)
else:
for (fileset, qi) in quota.quota_map.items():
print "%s: used %d MiB (%d%%) quota %d MiB in fileset %d" % (storage_name,
quota)
if __name__ == '__main__':
main()
示例3: print_vo_quota
# 需要导入模块: from vsc.utils.cache import FileCache [as 别名]
# 或者: from vsc.utils.cache.FileCache import load [as 别名]
def print_vo_quota(opts, storage, vos, now):
"""
Print the quota for the VO fileset.
"""
print "\nVO quota:"
for storage_name in [s for s in opts.options.storage if s != 'VSC_HOME']: # No VOs on VSC_HOME atm
mount_point = storage[storage_name].login_mount_point
path_template = storage.path_templates[storage_name]['vo']
path = os.path.join(mount_point, path_template[0], path_template[1](vos[0]), ".quota_fileset.json.gz")
cache = FileCache(path, True)
try:
(timestamp, quota) = cache.load('quota')
except TypeError:
logger.debug("Cannot load data from %s" % (path,))
print "%s: WARNING: No VO quota information found" % (storage_name,)
continue
if now - timestamp > opts.options.threshold:
print "%s: WARNING: no recent VO quota information (age of data is %d minutes)" % (storage_name,
(now-timestamp)/60)
else:
for (fileset, qi) in quota.quota_map.items():
pp = quota_pretty_print(storage_name, fileset, qi, opts.options.fileset_prefixes)
if pp:
print pp
示例4: test_save_and_load
# 需要导入模块: from vsc.utils.cache import FileCache [as 别名]
# 或者: from vsc.utils.cache.FileCache import load [as 别名]
def test_save_and_load(self):
"""Check if the loaded data is the same as the saved data."""
# test with random data
data, threshold = get_rand_data()
# create a tempfilename
(handle, filename) = tempfile.mkstemp()
os.unlink(filename)
os.close(handle)
cache = FileCache(filename)
for (key, value) in data.items():
cache.update(key, value, threshold)
cache.close()
now = time.time()
new_cache = FileCache(filename)
for key in data.keys():
info = cache.load(key)
self.assertTrue(info is not None)
(ts, value) = info
self.assertTrue(value == data[key])
self.assertTrue(ts <= now)
new_cache.close()
os.unlink(filename)
示例5: read_timestamp
# 需要导入模块: from vsc.utils.cache import FileCache [as 别名]
# 或者: from vsc.utils.cache.FileCache import load [as 别名]
def read_timestamp(filename):
"""Read the stored timestamp value from a pickled file.
@returns: string representing a timestamp in the proper LDAP time format
"""
cache = FileCache(filename)
(_, timestamp) = cache.load('timestamp')
return timestamp
示例6: read_cache
# 需要导入模块: from vsc.utils.cache import FileCache [as 别名]
# 或者: from vsc.utils.cache.FileCache import load [as 别名]
def read_cache(owner, showvo, running, idle, blocked, path):
"""
Unpickle the file and fill in the resulting datastructure.
"""
try:
cache = FileCache(path)
except:
print "Failed to load showq information from %s" % (path,)
res = cache.load('showq')[1][0]
user_map = cache.load('showq')[1][1]
## check for timeinfo
if res['timeinfo'] < (time.time() - MAXIMAL_AGE):
print "The data in the showq cache may be outdated. Please contact your admin to look into this."
# return (None, None)
del res['timeinfo']
logger.debug("Resulting cache data: %s" % (res))
# Filter out data that is not needed
if not showvo:
for user in res.keys():
if not user == owner:
#del res[user]
pass
for user in res.keys():
for host in res[user].keys():
logger.debug("looking at host %s" % (host))
states = res[user][host].keys()
if not running:
if 'Running' in states:
del res[user][host]['Running']
if not idle:
if 'Idle' in states:
del res[user][host]['Idle']
if not blocked:
for state in [x for x in states if not x in ('Running','Idle')]:
del res[user][host][state]
return (res, user_map)
示例7: test_wirte_donefile
# 需要导入模块: from vsc.utils.cache import FileCache [as 别名]
# 或者: from vsc.utils.cache.FileCache import load [as 别名]
def test_wirte_donefile(self):
""" Test the writing of the values to a cache file when done"""
donefile = "/tmp/done"
values = {
'completed' : 50,
'failed' : 5,
'unfinished' : 0
}
zkclient = RsyncSource('dummy', session='new', netcat=True, rsyncpath='/path/dummy', rsyncdepth=2, done_file=donefile)
zkclient.write_donefile(values)
cache_file = FileCache(donefile)
(timestamp, stats) = cache_file.load('stats')
self.assertEqual(values, stats)
示例8: read_cache
# 需要导入模块: from vsc.utils.cache import FileCache [as 别名]
# 或者: from vsc.utils.cache.FileCache import load [as 别名]
def read_cache(path):
"""
Unpickle the file and fill in the resulting datastructure.
"""
try:
cache = FileCache(path)
except Exception:
print "Failed to load checkjob information from %s" % (path,)
res = cache.load('checkjob')
if res[0] < (time.time() - MAXIMAL_AGE):
print "The data in the checkjob cache may be outdated. Please contact your admin to look into this."
return res[1] # CheckjobInfo
示例9: read_timestamp
# 需要导入模块: from vsc.utils.cache import FileCache [as 别名]
# 或者: from vsc.utils.cache.FileCache import load [as 别名]
def read_timestamp(filename):
"""Read the stored timestamp value from a pickled file.
@returns: string representing a timestamp in the proper LDAP time format
"""
cache = FileCache(filename)
(_, timestamp) = cache.load(0)
if not timestamp is None and timestamp.tzinfo is None:
# add local timezoneinfo
timestamp = timestamp.replace(tzinfo=Local)
return timestamp
示例10: test_contents
# 需要导入模块: from vsc.utils.cache import FileCache [as 别名]
# 或者: from vsc.utils.cache.FileCache import load [as 别名]
def test_contents(self, data, threshold):
"""Check that the contents of the cache is what is expected prior to closing it."""
# create a tempfilename
(handle, filename) = tempfile.mkstemp(dir='/tmp')
os.unlink(filename)
cache = FileCache(filename)
for (key, value) in data.items():
cache.update(key, value, threshold)
now = time.time()
for key in data.keys():
info = cache.load(key)
self.assertFalse(info is None)
(ts, value) = info
self.assertTrue(value == data[key])
self.assertTrue(ts <= now)
示例11: _load_pickle_cluster_file
# 需要导入模块: from vsc.utils.cache import FileCache [as 别名]
# 或者: from vsc.utils.cache.FileCache import load [as 别名]
def _load_pickle_cluster_file(self, host, raw=True):
"""Load the data from the pickled files.
@type host: string
@param host: cluster for which we load data
@returns: representation of the showq output.
"""
source = os.path.join(self._cache_pickle_directory(), self._cache_pickle_name(host))
if raw:
f = open(source, 'r')
output = cPickle.load(f)
f.close()
return output
else:
cache = FileCache(source)
return cache.load(self.cache_key)
示例12: report_and_exit
# 需要导入模块: from vsc.utils.cache import FileCache [as 别名]
# 或者: from vsc.utils.cache.FileCache import load [as 别名]
def report_and_exit(self):
"""Unpickles the cache file, prints the data and exits accordingly.
If the cache data is too old (now - cache timestamp > self.threshold), a critical exit is produced.
"""
try:
nagios_cache = FileCache(self.filename, True)
except:
self.log.critical("Error opening file %s for reading" % (self.filename))
unknown_exit("%s nagios pickled file unavailable (%s)" % (self.header, self.filename))
(timestamp, ((nagios_exit_code, nagios_exit_string), nagios_message)) = nagios_cache.load(0)
nagios_cache.close()
if self.threshold < 0 or time.time() - timestamp < self.threshold:
self.log.info("Nagios check cache file %s contents delivered: %s" % (self.filename, nagios_message))
print "%s %s" % (nagios_exit_string, nagios_message)
sys.exit(nagios_exit_code)
else:
unknown_exit("%s pickled file too old (timestamp = %s)" % (self.header, time.ctime(timestamp)))
示例13: process_hold
# 需要导入模块: from vsc.utils.cache import FileCache [as 别名]
# 或者: from vsc.utils.cache.FileCache import load [as 别名]
def process_hold(clusters, dry_run=False):
"""Process a filtered queueinfo dict"""
releasejob_cache = FileCache(RELEASEJOB_CACHE_FILE)
# get the showq data
for data in clusters.values():
data['path'] = data['spath'] # showq path
showq = Showq(clusters, cache_pickle=True)
(queue_information, _, _) = showq.get_moab_command_information()
# release the jobs, prepare the command
m = MoabCommand(cache_pickle=False, dry_run=dry_run)
for data in clusters.values():
data['path'] = data['mpath'] # mjobctl path
m.clusters = clusters
# read the previous data
ts_data = releasejob_cache.load('queue_information')
if ts_data is None:
old_queue_information = {}
else:
(_, old_queue_information) = ts_data
stats = {
'peruser': 0,
'total': 0,
'release': 0,
}
release_jobids = []
for user, clusterdata in queue_information.items():
oldclusterdata = old_queue_information.setdefault(user, {})
totaluser = 0
for cluster, data in clusterdata.items():
olddata = oldclusterdata.setdefault(cluster, {})
# DRMJID is supposed to be unique
# get all oldjobids in one dict
oldjobs = dict([(j['DRMJID'], j['_release']) for jt in olddata.values() for j in jt])
for jobtype, jobs in data.items():
removeids = []
for idx, job in enumerate(jobs):
jid = job['DRMJID']
if jobtype in RELEASEJOB_SUPPORTED_HOLDTYPES:
totaluser += 1
release = max(oldjobs.get(jid, 0), 0) + 1
job['_release'] = release
stats['release'] = max(stats['release'], release)
release_jobids.append(jid)
# release the job
cmd = [m.clusters[cluster]['path'], '-u', jid]
logger.info("Releasing job %s cluster %s for the %s-th time." % (jid, cluster, release))
if dry_run:
logger.info("Dry run %s" % cmd)
else:
m._run_moab_command(cmd, cluster, [])
else:
# keep historical data, eg a previously released job could be idle now
# but keep the counter in case it gets held again
try:
release = oldjobs[jid]
job['_release'] = release
except KeyError:
# not previously in hold, remove it
removeids.append(idx)
# remove the jobs (in reverse order)
for remove_idx in removeids[::-1]:
jobs.pop(remove_idx)
# cleanup
if len(jobs) == 0:
data.pop(jobtype)
# cleanup
if len(data) == 0:
clusterdata.pop(cluster)
# cleanup
if len(clusterdata) == 0:
queue_information.pop(user)
# update stats
stats['peruser'] = max(stats['peruser'], totaluser)
stats['total'] += totaluser
logger.info("Release statistics: total jobs in hold %(total)s; max in hold per user %(peruser)s; max releases per job %(release)s" % stats)
# update and close
releasejob_cache.update('queue_information', queue_information, 0)
releasejob_cache.close()
return release_jobids, stats