本文整理汇总了Python中FWCore.PythonUtilities.LumiList.LumiList.getCompactList方法的典型用法代码示例。如果您正苦于以下问题:Python LumiList.getCompactList方法的具体用法?Python LumiList.getCompactList怎么用?Python LumiList.getCompactList使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类FWCore.PythonUtilities.LumiList.LumiList
的用法示例。
在下文中一共展示了LumiList.getCompactList方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: mergeLumis
# 需要导入模块: from FWCore.PythonUtilities.LumiList import LumiList [as 别名]
# 或者: from FWCore.PythonUtilities.LumiList.LumiList import getCompactList [as 别名]
def mergeLumis(inputdata, lumimask):
"""
Computes the processed lumis, merges if needed and returns the compacted list.
"""
mergedlumis = LumiList()
doublelumis = LumiList()
for report in inputdata:
doublelumis = mergedlumis & LumiList(runsAndLumis=report)
mergedlumis = mergedlumis | LumiList(runsAndLumis=report)
return mergedlumis.getCompactList(), (LumiList(compactList=lumimask) - mergedlumis).getCompactList(), doublelumis.getCompactList()
示例2: mergeLumis
# 需要导入模块: from FWCore.PythonUtilities.LumiList import LumiList [as 别名]
# 或者: from FWCore.PythonUtilities.LumiList.LumiList import getCompactList [as 别名]
def mergeLumis(inputdata, lumimask):
"""
Computes the processed lumis, merges if needed and returns the compacted list.
"""
mergedlumis = LumiList()
doublelumis = LumiList()
for report in inputdata:
doublelumis = mergedlumis & LumiList(runsAndLumis=report)
mergedlumis = mergedlumis | LumiList(runsAndLumis=report)
if doublelumis:
self.logger.info("Warning: double run-lumis processed %s" % doublelumis)
return mergedlumis.getCompactList(), (LumiList(compactList=lumimask) - mergedlumis).getCompactList()
示例3: getInputRunLumi
# 需要导入模块: from FWCore.PythonUtilities.LumiList import LumiList [as 别名]
# 或者: from FWCore.PythonUtilities.LumiList.LumiList import getCompactList [as 别名]
def getInputRunLumi(self, file):
import xml.dom.minidom
dom = xml.dom.minidom.parse(file)
ll=[]
for elem in dom.getElementsByTagName("Job"):
nJob = int(elem.getAttribute("JobID"))
lumis = elem.getAttribute('Lumis')
#lumis = '193752:1'
#lumis = '193752:1-193752:5,193774:1-193774:5,193775:1'
if lumis:
tmp=str.split(str(lumis), ",")
#print "tmp = ", tmp
else:
msg = "The summary file inputLumiSummaryOfTask.json about input run and lumi isn't created"
common.logger.info(msg)
return
#tmp = [193752:1-193752:5] [193774:1-193774:5]
for entry in tmp:
run_lumi=str.split(entry, "-")
# run_lumi = [193752:1] [193752:5]
if len(run_lumi) == 0: pass
if len(run_lumi) == 1:
lumi = str.split(run_lumi[0],":")[1]
run = str.split(run_lumi[0],":")[0]
ll.append((run,int(lumi)))
if len(run_lumi) == 2:
lumi_max = str.split(run_lumi[1],":")[1]
lumi_min = str.split(run_lumi[0],":")[1]
run = str.split(run_lumi[1],":")[0]
for count in range(int(lumi_min),int(lumi_max) + 1):
ll.append((run,count))
if len(ll):
lumiList = LumiList(lumis = ll)
compactList = lumiList.getCompactList()
totalLumiFilename = self.fjrDirectory + 'inputLumiSummaryOfTask.json'
totalLumiSummary = open(totalLumiFilename, 'w')
json.dump(compactList, totalLumiSummary)
totalLumiSummary.write('\n')
totalLumiSummary.close()
msg = "Summary file of input run and lumi to be analize with this task: %s\n" %totalLumiFilename
common.logger.info(msg)
else:
msg = "The summary file inputLumiSummaryOfTask.json about input run and lumi isn't created"
common.logger.info(msg)
return totalLumiFilename
示例4: add_merged_sample
# 需要导入模块: from FWCore.PythonUtilities.LumiList import LumiList [as 别名]
# 或者: from FWCore.PythonUtilities.LumiList.LumiList import getCompactList [as 别名]
def add_merged_sample(NAME, type, AnaUrl, FWUrl, samples, comment):
# samples is a simple dict containing three keys: 'process', 'dataset_id', 'sample_id'
dbstore = DbStore()
sample = None
# check that source dataset exist
# Skip: should exist, the check has been done before calling this function
# check that there is no existing entry
update = False
localpath = ''
nevents = 0
checkExisting = dbstore.find(Sample, Sample.name == unicode(NAME))
if checkExisting.is_empty():
sample = Sample(unicode(NAME), unicode(localpath), unicode(type), nevents)
else:
update = True
sample = checkExisting.one()
sample.removeFiles(dbstore)
# collecting contents
sample.nevents_processed = 0
sample.nevents = 0
sample.normalization = 1
sample.event_weight_sum = 0
extras_event_weight_sum = {}
dataset_nevents = 0
processed_lumi = LumiList()
for i, s in enumerate(samples):
if i == 0:
sample.source_dataset_id = s['dataset_id']
sample.source_sample_id = s['sample_id']
results = dbstore.find(Sample, Sample.sample_id == s['sample_id'])
# Should exist, the check has been done before calling this function
sample.nevents_processed += results[0].nevents_processed
sample.nevents += results[0].nevents
sample.event_weight_sum += results[0].event_weight_sum
extra_sumw = results[0].extras_event_weight_sum
if extra_sumw is not None:
extra_sumw = json.loads(extra_sumw)
for key in extra_sumw:
try:
extras_event_weight_sum[key] += extra_sumw[key]
except KeyError:
extras_event_weight_sum[key] = extra_sumw[key]
tmp_processed_lumi = results[0].processed_lumi
if tmp_processed_lumi is not None:
tmp_processed_lumi = json.loads( tmp_processed_lumi )
processed_lumi = processed_lumi | LumiList(compactList = tmp_processed_lumi)
# Get info from file table
results = dbstore.find(File, File.sample_id == s['sample_id'])
for lfn, pfn, event_weight_sum, file_extras_event_weight_sum, nevents in list(results.values(File.lfn, File.pfn, File.event_weight_sum, File.extras_event_weight_sum, File.nevents)):
f = File(lfn, pfn, event_weight_sum, file_extras_event_weight_sum, nevents)
sample.files.add(f)
# Get info from parent datasets
results = dbstore.find(Dataset, Dataset.dataset_id == s['dataset_id'])
dataset_nevents += results[0].nevents
if len(extras_event_weight_sum) > 0:
sample.extras_event_weight_sum = unicode(json.dumps(extras_event_weight_sum))
if len(processed_lumi.getCompactList()) > 0:
sample.processed_lumi = unicode(json.dumps(processed_lumi.getCompactList()))
sample.code_version = unicode(AnaUrl + ' ' + FWUrl) #NB: limited to 255 characters, but so far so good
if sample.nevents_processed != dataset_nevents:
sample.user_comment = unicode("Sample was not fully processed, only " + str(sample.nevents_processed) + "/" + str(dataset_nevents) + " events were processed. " + comment)
else:
sample.user_comment = unicode(comment)
sample.author = unicode(getpwuid(os.stat(os.getcwd()).st_uid).pw_name)
if not update:
dbstore.add(sample)
if sample.luminosity is None:
sample.luminosity = sample.getLuminosity()
print sample
dbstore.commit()
return
else:
sample.luminosity = sample.getLuminosity()
print("Sample updated")
print(sample)
dbstore.commit()
return
# rollback
dbstore.rollback()
示例5: popen
# 需要导入模块: from FWCore.PythonUtilities.LumiList import LumiList [as 别名]
# 或者: from FWCore.PythonUtilities.LumiList.LumiList import getCompactList [as 别名]
sys.exit(1)
import re, os, subprocess
from pprint import pprint
from collections import defaultdict
from FWCore.PythonUtilities.LumiList import LumiList
from RecoLuminosity.LumiDB import sessionManager, lumiCalcAPI, revisionDML
from JMTucker.Tools.general import from_pickle, to_pickle
os.system('mkdir -p prescales_temp')
def popen(cmd):
return subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True).communicate()[0]
ll = LumiList('prescales_temp/Cert_190456-208686_8TeV_PromptReco_Collisions12_JSON.txt')
ll_compact = ll.getCompactList()
runs = [int(i) for i in ll.getRuns()]
runs.sort()
def dump_lumibyls(runs):
l = float(len(runs))
for i,run in enumerate(runs):
out_fn = 'prescales_temp/lumibyls/%i.csv' % run
already = os.path.isfile(out_fn)
print 'run %i (%i/%i)%s' % (run, i+1, l, ' (skipping since already dumped)' if already else '')
if already:
continue
popen('lumiCalc2.py lumibyls -r %i -o %s' % (run, out_fn))
def parse_lumibyls(run):
d = defaultdict(dict)
示例6: run
# 需要导入模块: from FWCore.PythonUtilities.LumiList import LumiList [as 别名]
# 或者: from FWCore.PythonUtilities.LumiList.LumiList import getCompactList [as 别名]
def run(self):
"""
The main method of the class: report status of a task
"""
common.logger.debug( "Reporter::run() called")
task = common._db.getTask()
msg= "--------------------\n"
msg += "Dataset: %s\n"%str(task['dataset'])
if self.cfg_params.has_key('USER.copy_data') and int(self.cfg_params['USER.copy_data'])==1:
msg+= "Remote output :\n"
## TODO: SL should come from jobDB!
from PhEDExDatasvcInfo import PhEDExDatasvcInfo
stageout = PhEDExDatasvcInfo(self.cfg_params)
endpoint, lfn, SE, SE_PATH, user = stageout.getEndpoint()
#print endpoint, lfn, SE, SE_PATH, user
msg+= "SE: %s %s srmPath: %s\n"%(self.cfg_params['USER.storage_element'],SE,endpoint)
else:
msg += "Local output: %s\n" % task['outputDirectory']
#print task
possible_status = [ 'Created',
'Undefined',
'Submitting',
'Submitted',
'NotSubmitted',
'Waiting',
'Ready',
'Scheduled',
'Running',
'Done',
'Killing',
'Killed',
'Aborted',
'Unknown',
'Done (Failed)',
'Cleared',
'Retrieved'
]
eventsRead=0
eventsRequired=0
filesRead=0
filesRequired=0
lumis = []
for job in task.getJobs():
if (job.runningJob['applicationReturnCode']!=0 or job.runningJob['wrapperReturnCode']!=0): continue
# get FJR filename
fjr = self.fjrDirectory + job['outputFiles'][-1]
jobReport = readJobReport(fjr)
if len(jobReport) > 0:
inputFiles = jobReport[0].inputFiles
for inputFile in inputFiles:
# Accumulate the list of lum sections run over
for run in inputFile.runs.keys():
for lumi in inputFile.runs[run]:
lumis.append((run, lumi))
filesRead+=1
eventsRead+=int(inputFile['EventsRead'])
#print jobReport[0].inputFiles,'\n'
else:
pass
#print 'no FJR avaialble for job #%s'%job['jobId']
#print "--------------------------"
# Compact and write the list of successful lumis
lumiList = LumiList(lumis = lumis)
compactList = lumiList.getCompactList()
lumiFilename = task['outputDirectory'] + 'lumiSummary.json'
lumiSummary = open(lumiFilename, 'w')
json.dump(compactList, lumiSummary)
lumiSummary.write('\n')
lumiSummary.close()
msg += "Total Events read: %s\n" % eventsRead
msg += "Total Files read: %s\n" % filesRead
msg += "Total Jobs : %s\n" % len(task.getJobs())
msg += "Luminosity section summary file: %s\n" % lumiFilename
list_ID={}
# TEMPORARY by Fabio, to be removed
# avoid clashes between glite_slc5 and glite schedulers when a server is used
# otherwise, -report with a server requires a local scheduler
if self.cfg_params.get('CRAB.server_name', None) is None:
common.logger.debug( "Reporter updating task status")
task = common.scheduler.queryEverything(task['id'])
for st in possible_status:
list_ID = common._db.queryAttrRunJob({'statusScheduler':st},'jobId')
if (len(list_ID)>0):
msg+= " # Jobs: %s:%s\n"%(str(st),len(list_ID))
pass
msg+= "\n----------------------------\n"
common.logger.info(msg)
#.........这里部分代码省略.........
示例7: LumiList
# 需要导入模块: from FWCore.PythonUtilities.LumiList import LumiList [as 别名]
# 或者: from FWCore.PythonUtilities.LumiList.LumiList import getCompactList [as 别名]
for j in inputJSONFiles:
impLumis = impLumis | LumiList(filename=j)
if impLumis is not None:
if args.union:
edmLumis = edmLumis | impLumis
if args.subtract:
edmLumis = edmLumis - impLumis
if args.intersect:
edmLumis = edmLumis & impLumis
reclumiData=None
dellumiData=None
if lumiCalc is not None:
print "Accessing LumiDB... can take a while..."
dellumiData=lumiCalc.deliveredLumiForRange(edmLumis.getCompactList())
reclumiData=lumiCalc.recordedLumiForRange(edmLumis.getCompactList())
totalRec = 0.0
totalDel = 0.0
for dpr in dellumiData:
if dpr[2] != 'N/A':
totalDel += float(dpr[2])
for dpr in reclumiData:
totalRec += lumiCalc.calculateTotalRecorded(dpr[2])
print "Delivered Luminosity: ",totalDel
print "Recorded Luminosity: ",totalRec
if args.outputJSON:
edmLumis.writeJSON(outputJSON)
if args.printJSON: