本文整理汇总了Python中grid_control.datasets.DataProvider.loadState方法的典型用法代码示例。如果您正苦于以下问题:Python DataProvider.loadState方法的具体用法?Python DataProvider.loadState怎么用?Python DataProvider.loadState使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类grid_control.datasets.DataProvider
的用法示例。
在下文中一共展示了DataProvider.loadState方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: resync
# 需要导入模块: from grid_control.datasets import DataProvider [as 别名]
# 或者: from grid_control.datasets.DataProvider import loadState [as 别名]
def resync(self):
(result_redo, result_disable, result_sizeChange) = ParameterSource.resync(self)
if self.resyncEnabled() and self.dataProvider:
# Get old and new dataset information
old = DataProvider.loadState(self.getDataPath('cache.dat')).getBlocks()
self.dataProvider.clearCache()
new = self.dataProvider.getBlocks()
self.dataProvider.saveState(self.getDataPath('cache-new.dat'))
# Use old splitting information to synchronize with new dataset infos
jobChanges = self.dataSplitter.resyncMapping(self.getDataPath('map-new.tar'), old, new)
if jobChanges:
# Move current splitting to backup and use the new splitting from now on
def backupRename(old, cur, new):
if self.keepOld:
os.rename(self.getDataPath(cur), self.getDataPath(old))
os.rename(self.getDataPath(new), self.getDataPath(cur))
backupRename( 'map-old-%d.tar' % time.time(), 'map.tar', 'map-new.tar')
backupRename('cache-old-%d.dat' % time.time(), 'cache.dat', 'cache-new.dat')
old_maxN = self.dataSplitter.getMaxJobs()
self.dataSplitter.importState(self.getDataPath('map.tar'))
self.maxN = self.dataSplitter.getMaxJobs()
self.dataSplitter.getMaxJobs()
result_redo.update(jobChanges[0])
result_disable.update(jobChanges[1])
result_sizeChange = result_sizeChange or (old_maxN != self.maxN)
self.resyncFinished()
return (result_redo, result_disable, result_sizeChange)
示例2: map
# 需要导入模块: from grid_control.datasets import DataProvider [as 别名]
# 或者: from grid_control.datasets.DataProvider import loadState [as 别名]
print "Resetting attempts", jobNum
jobinfo = jobDB.get(jobNum)
jobinfo.attempt = 0
jobinfo.history = {}
for key in jobinfo.dict.keys():
if key.startswith('history'):
jobinfo.dict.pop(key)
jobDB.commit(jobNum, jobinfo)
print str.join(' ', map(str, jobDB.getJobsIter(selected)))
if opts.diff:
if len(args) != 2:
utils.exitWithUsage("%s <dataset source 1> <dataset source 2>" % sys.argv[0])
utils.eprint = lambda *x: {}
a = DataProvider.loadState(args[0])
b = DataProvider.loadState(args[1])
(blocksAdded, blocksMissing, blocksChanged) = DataProvider.resyncSources(a.getBlocks(), b.getBlocks())
utils.printTabular([(DataProvider.Dataset, "Dataset"), (DataProvider.BlockName, "Block")], blocksMissing)
if opts.findrm:
removed = []
utils.eprint = lambda *x: {}
oldDP = DataProvider.loadState(args[0])
for new in args[1:]:
newDP = DataProvider.loadState(new)
(blocksAdded, blocksMissing, blocksChanged) = DataProvider.resyncSources(oldDP.getBlocks(), newDP.getBlocks())
for block in blocksMissing:
tmp = dict(block)
tmp[-1] = new
removed.append(tmp)
示例3: main
# 需要导入模块: from grid_control.datasets import DataProvider [as 别名]
# 或者: from grid_control.datasets.DataProvider import loadState [as 别名]
def main():
dataset = args[0].strip()
cfgSettings = {'dbs blacklist T1': 'False', 'remove empty blocks': 'False',
'remove empty files': 'False', 'location format': opts.locationfmt,
'nickname check collision': 'False'}
if opts.metadata or opts.blockmetadata:
cfgSettings['lumi filter'] = '-'
cfgSettings['keep lumi metadata'] = 'True'
section = 'dataset'
fillerList = [DefaultFilesConfigFiller()]
if opts.settings:
fillerList.append(FileConfigFiller([opts.settings]))
tmpCfg = Config(fillerList, opts.settings)
section = tmpCfg.get('global', ['task', 'module'])
dummyConfig = Config(fillerList + [DictConfigFiller({section: cfgSettings})], opts.settings)
dummyConfig.opts = opts
dummyConfig = dummyConfig.addSections(['dataset'])
if os.path.exists(dataset):
provider = DataProvider.loadState(dataset, dummyConfig)
else:
provider = DataProvider.create(dummyConfig, dataset, opts.provider)
blocks = provider.getBlocks()
if len(blocks) == 0:
raise DatasetError('No blocks!')
datasets = set(map(lambda x: x[DataProvider.Dataset], blocks))
if len(datasets) > 1 or opts.info:
headerbase = [(DataProvider.Dataset, 'Dataset')]
else:
print 'Dataset: %s' % blocks[0][DataProvider.Dataset]
headerbase = []
if opts.configentry:
print
print 'dataset ='
infos = {}
order = []
maxnick = 5
for block in blocks:
dsName = block[DataProvider.Dataset]
if not infos.get(dsName, None):
order.append(dsName)
infos[dsName] = dict([(DataProvider.Dataset, dsName)])
if DataProvider.Nickname not in block and opts.confignick:
try:
if '/' in dsName:
block[DataProvider.Nickname] = dsName.lstrip('/').split('/')[1]
else:
block[DataProvider.Nickname] = dsName
except:
pass
if DataProvider.Nickname not in block and opts.confignick:
block[DataProvider.Nickname] = np.getName(None, dsName, block)
if DataProvider.Nickname in block:
nick = block[DataProvider.Nickname]
infos[dsName][DataProvider.Nickname] = nick
maxnick = max(maxnick, len(nick))
if len(block[DataProvider.FileList]):
infos[dsName][DataProvider.URL] = block[DataProvider.FileList][0][DataProvider.URL]
for dsID, dsName in enumerate(order):
info = infos[dsName]
short = DataProvider.providers.get(provider.__class__.__name__, provider.__class__.__name__)
print '', info.get(DataProvider.Nickname, 'nick%d' % dsID).rjust(maxnick), ':', short, ':',
print '%s%s' % (provider._datasetExpr, QM(short == 'list', ' %% %s' % info[DataProvider.Dataset], ''))
if opts.listdatasets:
# Add some enums for consistent access to info dicts
DataProvider.NFiles = -1
DataProvider.NBlocks = -2
print
infos = {}
order = []
infosum = {DataProvider.Dataset : 'Sum'}
for block in blocks:
dsName = block.get(DataProvider.Dataset, '')
if not infos.get(dsName, None):
order.append(dsName)
infos[dsName] = {DataProvider.Dataset: block[DataProvider.Dataset]}
def updateInfos(target):
target[DataProvider.NBlocks] = target.get(DataProvider.NBlocks, 0) + 1
target[DataProvider.NFiles] = target.get(DataProvider.NFiles, 0) + len(block[DataProvider.FileList])
target[DataProvider.NEntries] = target.get(DataProvider.NEntries, 0) + block[DataProvider.NEntries]
updateInfos(infos[dsName])
updateInfos(infosum)
head = [(DataProvider.Dataset, 'Dataset'), (DataProvider.NEntries, '#Events'),
(DataProvider.NBlocks, '#Blocks'), (DataProvider.NFiles, '#Files')]
utils.printTabular(head, map(lambda x: infos[x], order) + ["=", infosum])
if opts.listblocks:
print
utils.printTabular(headerbase + [(DataProvider.BlockName, 'Block'), (DataProvider.NEntries, 'Events')], blocks)
if opts.listfiles:
print
for block in blocks:
#.........这里部分代码省略.........