本文整理汇总了Python中WMCore.DataStructs.LumiList.LumiList.iteritems方法的典型用法代码示例。如果您正苦于以下问题:Python LumiList.iteritems方法的具体用法?Python LumiList.iteritems怎么用?Python LumiList.iteritems使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类WMCore.DataStructs.LumiList.LumiList
的用法示例。
在下文中一共展示了LumiList.iteritems方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: makeNewJobByWork
# 需要导入模块: from WMCore.DataStructs.LumiList import LumiList [as 别名]
# 或者: from WMCore.DataStructs.LumiList.LumiList import iteritems [as 别名]
def makeNewJobByWork(self, reason='', failedJob=False):
"""
Make a new job given the passed in parameters.
:param reason: Why are we making a new job (debugging only)
:param failedJob: Make the job as already failed
:return: nothing
"""
events = self.eventsInJob
lumis = self.jobLumis
files = self.jobFiles
self.maxLumis = max(self.maxLumis, len(lumis))
# Transform the lumi list into something compact and usable
lumiList = LumiList(lumis=lumis).getCompactList()
logging.debug("Because %s new job with events: %s, lumis: %s, and files: %s",
reason, events, lumiList, [f['lfn'] for f in files])
if failedJob:
logging.debug(" This job will be made failed")
self.newJob(failedJob=failedJob, failedReason=reason)
else:
self.newJob()
# Calculate and add performance information
timePerEvent, sizePerEvent, memoryRequirement = self.getPerformanceParameters(self.perfParameters)
self.currentJob.addResourceEstimates(jobTime=events * timePerEvent, disk=events * sizePerEvent,
memory=memoryRequirement)
# Add job mask information
for run, lumiRanges in lumiList.iteritems():
for lumiRange in lumiRanges:
self.currentJob['mask'].addRunAndLumis(run=int(run), lumis=lumiRange)
# Add files
for f in files:
self.currentJob.addFile(f)
# Add pileup info if needed
if self.deterministicPU:
eventsToSkip = (self.nJobs - 1) * self.maxEvents * self.maxLumis
logging.debug('Adding baggage to skip %s events', eventsToSkip)
self.currentJob.addBaggageParameter("skipPileupEvents", eventsToSkip)
return
示例2: report
# 需要导入模块: from WMCore.DataStructs.LumiList import LumiList [as 别名]
# 或者: from WMCore.DataStructs.LumiList.LumiList import iteritems [as 别名]
def report(self, workflow, userdn, usedbs):
"""
Computes the report for workflow. If usedbs is used also query DBS and return information about the input and output datasets
"""
def _compactLumis(datasetInfo):
""" Help function that allow to convert from runLumis divided per file (result of listDatasetFileDetails)
to an aggregated result.
"""
lumilist = {}
for dummyfile, info in datasetInfo.iteritems():
for run, lumis in info['Lumis'].iteritems():
lumilist.setdefault(str(run), []).extend(lumis)
return lumilist
res = {}
self.logger.info("About to compute report of workflow: %s with usedbs=%s. Getting status first." % (workflow, usedbs))
statusRes = self.status(workflow, userdn)[0]
#get the information we need from the taskdb/initilize variables
row = next(self.api.query(None, None, self.Task.ID_sql, taskname = workflow))
row = self.Task.ID_tuple(*row)
inputDataset = row.input_dataset
outputDatasets = literal_eval(row.output_dataset.read() if row.output_dataset else 'None')
dbsUrl = row.dbs_url
#load the lumimask
splitArgs = literal_eval(row.split_args.read())
res['lumiMask'] = buildLumiMask(splitArgs['runs'], splitArgs['lumis'])
self.logger.info("Lumi mask was: %s" % res['lumiMask'])
#extract the finished jobs from filemetadata
jobids = [x[1] for x in statusRes['jobList'] if x[0] in ['finished']]
rows = self.api.query(None, None, self.FileMetaData.GetFromTaskAndType_sql, filetype='EDM,TFILE,POOLIN', taskname=workflow)
res['runsAndLumis'] = {}
for row in rows:
if row[GetFromTaskAndType.PANDAID] in jobids:
if str(row[GetFromTaskAndType.PANDAID]) not in res['runsAndLumis']:
res['runsAndLumis'][str(row[GetFromTaskAndType.PANDAID])] = []
res['runsAndLumis'][str(row[GetFromTaskAndType.PANDAID])].append( { 'parents': row[GetFromTaskAndType.PARENTS].read(),
'runlumi': row[GetFromTaskAndType.RUNLUMI].read(),
'events': row[GetFromTaskAndType.INEVENTS],
'type': row[GetFromTaskAndType.TYPE],
'lfn': row[GetFromTaskAndType.LFN],
})
self.logger.info("Got %s edm files for workflow %s" % (len(res['runsAndLumis']), workflow))
if usedbs:
if not outputDatasets:
raise ExecutionError("Cannot find any information about the output datasets names. You can try to execute 'crab report' with --dbs=no")
try:
#load the input dataset's lumilist
dbs = DBSReader(dbsUrl)
inputDetails = dbs.listDatasetFileDetails(inputDataset)
res['dbsInLumilist'] = _compactLumis(inputDetails)
self.logger.info("Aggregated input lumilist: %s" % res['dbsInLumilist'])
#load the output datasets' lumilist
res['dbsNumEvents'] = 0
res['dbsNumFiles'] = 0
res['dbsOutLumilist'] = {}
dbs = DBSReader("https://cmsweb.cern.ch/dbs/prod/phys03/DBSReader") #We can only publish here with DBS3
outLumis = []
for outputDataset in outputDatasets:
outputDetails = dbs.listDatasetFileDetails(outputDataset)
outLumis.append(_compactLumis(outputDetails))
res['dbsNumEvents'] += sum(x['NumberOfEvents'] for x in outputDetails.values())
res['dbsNumFiles'] += sum(len(x['Parents']) for x in outputDetails.values())
outLumis = LumiList(runsAndLumis = outLumis).compactList
for run, lumis in outLumis.iteritems():
res['dbsOutLumilist'][run] = reduce(lambda x1, x2: x1+x2, map(lambda x: range(x[0], x[1]+1), lumis))
self.logger.info("Aggregated output lumilist: %s" % res['dbsOutLumilist'])
except Exception as ex:
msg = "Failed to contact DBS: %s" % str(ex)
self.logger.exception(msg)
raise ExecutionError("Exception while contacting DBS. Cannot get the input/output lumi lists. You can try to execute 'crab report' with --dbs=no")
yield res