本文整理汇总了Python中WMCore.Database.CMSCouch.Database.loadView方法的典型用法代码示例。如果您正苦于以下问题:Python Database.loadView方法的具体用法?Python Database.loadView怎么用?Python Database.loadView使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类WMCore.Database.CMSCouch.Database
的用法示例。
在下文中一共展示了Database.loadView方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: getFileInformation
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import loadView [as 别名]
def getFileInformation(workflow, lfn, outModule):
# Connect to the FWJR DB
fwjrDB = Database('wmagent_jobdump/fwjrs', 'http://dummy.cern.ch:5984')
result = fwjrDB.loadView('FWJRDump', 'jobsByOutputLFN', {'include_docs' : True}, [[workflow, lfn]])
if result['rows']:
fwjrDoc = result['rows'][0]['doc']
fwjrInfo = fwjrDoc['fwjr']
for step in fwjrInfo['steps']:
if step == 'cmsRun1':
if outModule not in fwjrInfo['steps'][step]['output']:
print "WARNING: No output module %s in this job" % outModule
return
outModuleInfo = fwjrInfo['steps'][step]['output'][outModule]
for fileInfo in outModuleInfo:
if fileInfo['lfn'] == lfn:
print "File information, %s" % fileInfo['lfn']
print "Run/Lumis:"
for run in fileInfo['runs']:
print 'Run: %s, Lumi range: %s-%s' % (run, fileInfo['runs'][run][0], fileInfo['runs'][run][1])
print "Number of Events: %s" % fileInfo['events']
print "Filesize (bytes): %.1f" % (float(fileInfo['size']))
print "Adler32 Checksum: %s" % fileInfo['checksums']['adler32']
else:
print "WARNING: No file info in CouchDB"
return
示例2: main
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import loadView [as 别名]
def main():
config = loadConfigurationFile(os.environ['WMAGENT_CONFIG'])
config.CoreDatabase.dialect = 'oracle'
init = WMInit()
init.setDatabaseConnection(config.CoreDatabase.connectUrl,
config.CoreDatabase.dialect)
couchDB = Database('wmagent_jobdump/fwjrs', '')
couchDB2 = Database('wmagent_jobdump/jobs', '')
myThread = threading.currentThread()
daofactory = DAOFactory(package = "WMCore.WMBS",
logger = logging,
dbinterface = myThread.dbi)
getJobsDAO = daofactory(classname = "Jobs.GetAllJobs")
completedJobs = getJobsDAO.execute(state = 'complete')
candidates = []
while len(completedJobs):
candidates = []
chunk = completedJobs[:500]
completedJobs = completedJobs[500:]
result = couchDB.loadView('FWJRDump', 'outputByJobID', keys = chunk)
rows = result['rows']
for entry in rows:
candidates.append(entry['key'])
for jobId in candidates:
doc = couchDB2.document(str(jobId))
last = max(map(int, doc['states'].keys()))
lastState = doc['states'][str(last)]['newstate']
if lastState == 'success':
print jobId
示例3: main
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import loadView [as 别名]
def main():
"""
_main_
"""
usage = "Usage: python %prog -w workflow"
parser = OptionParser(usage = usage)
parser.add_option('-w', '--workflow', help = 'Workflow name in ReqMgr', dest = 'wf')
(options, args) = parser.parse_args()
if not options.wf:
parser.error('You must provide a workflow name')
sys.exit(1)
couchUrl = "https://cmsweb.cern.ch/couchdb"
database = "acdcserver"
failures = {}
svc = Database(database, couchUrl)
result = svc.loadView("ACDC", "byCollectionName", {'key' : options.wf, 'include_docs' : True, 'reduce' : False})
print "Found %i failures/rows in total." % len(result["rows"])
for entry in result["rows"]:
if entry['doc']['fileset_name'] in failures:
failures[entry['doc']['fileset_name']] += 1
else:
failures[entry['doc']['fileset_name']] = 1
pprint(failures)
print "\nDone!"
示例4: checkWorkQueue
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import loadView [as 别名]
def checkWorkQueue(requestName):
result = {'ActiveAgents' : {},
'ElementsRunning' : 0,
'ElementsAcquired' : 0,
'ElementsAvailable' : 0,
'ElementsDone' : 0}
x = Database('workqueue', 'https://cmsweb.cern.ch/couchdb')
y = x.loadView('WorkQueue', 'elementsByParent', {'include_docs' : True}, [requestName])
for entry in y['rows']:
doc = entry['doc']
element = doc['WMCore.WorkQueue.DataStructs.WorkQueueElement.WorkQueueElement']
status = element['Status']
if status == 'Running':
result['ElementsRunning'] += 1
elif status == 'Acquired':
result['ElementsAcquired'] += 1
elif status == 'Available':
result['ElementsAvailable'] += 1
elif status == 'Done':
result['ElementsDone'] += 1
if status not in ['Done', 'Available']:
agent = element['ChildQueueUrl']
if agent not in result['ActiveAgents']:
result['ActiveAgents'][agent] = 0
result['ActiveAgents'][agent] += 1
return result
示例5: checkForMissingFiles
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import loadView [as 别名]
def checkForMissingFiles(options):
#Initialize stuff
phedexAPI = PhEDEx({'cachepath' : options.cachepath})
acdcCouch = Database('wmagent_acdc', options.acdcUrl)
#Let's get the IDs of the ACDC documents for the task/request/group/user
array = [options.group, options.user, options.request, options.task]
result = acdcCouch.loadView('ACDC', 'owner_coll_fileset_docs', {'reduce' : False}, [array])
documentsIDs = [x['id'] for x in result['rows']]
badFiles = {}
#Go through the documents
for docID in documentsIDs:
doc = acdcCouch.document(docID)
#Are we going to change this doc? Better back it up
if options.change:
backupFile = os.open(os.path.join(options.backup, "%s.bkp" % doc["_id"]), 'w')
json.dump(doc, backupFile)
backupFile.close()
#Go through the files
files = doc["files"]
for inputFile in files:
#Use PhEDEx API to get site based on the SE
se = files[inputFile]["locations"][0]
siteLocation = phedexAPI.getBestNodeName(se)
#Now get the PFN
pfnDict = phedexAPI.getPFN(siteLocation, inputFile)
inputPfn = pfnDict[(siteLocation, inputFile)]
#Run lcg-ls commands and see what we get
command = 'lcg-ls -b -D srmv2 --srm-timeout 60 %s' % inputPfn
commandList = shlex.split(command)
try:
(stdout, stderr, exitCode) = runCommand(commandList, False, 70)
except Exception, ex:
exitCode = 99999
stdout = ''
stderr = str(ex)
if exitCode:
#Something went wrong with the command
#Mark the file as bad
if docID not in badFiles:
badFiles[docID] = []
badFiles[docID].append(inputFile)
print 'File %s is thought to be bad' % inputFile
print 'Command was %s' % command
print 'Return code was %i' % exitCode
print 'Stdout was %s' % stdout
print 'Stderr was %s' % stderr
示例6: findParentJobs
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import loadView [as 别名]
def findParentJobs(jobId):
# Connect to the Job and FWJR DBs
jobDB = Database('wmagent_jobdump/jobs', 'http://dummy.cern.ch:5984')
fwjrDB = Database('wmagent_jobdump/fwjrs', 'http://dummy.cern.ch:5984')
# Get the document of the child job
childJobDoc = jobDB.document(id = jobId)
# Get the workflow and input files, transforms it into suitable keys [workflow, lfn]
workflow = childJobDoc['workflow']
inputLfns = [x['lfn'] for x in childJobDoc['inputfiles']]
keys = [[workflow, x] for x in inputLfns]
# Get the jobs that produced the input files for this job
# Load the id and fwjr for these jobs since we have to re-run them
result = fwjrDB.loadView('FWJRDump', 'jobsByOutputLFN', {}, keys)
for entry in result['rows']:
key = entry['key']
jobId = entry['value']
fwjrId = entry['id']
result = fwjrDB.loadView('FWJRDump', 'logArchivesByJobID', {}, [[int(x) for x in fwjrId.split('-')]])
logArch = result['rows'][0]['value']['lfn']
# Check whether the logArch is in some LogCollect
logCollectTarball = ''
result = jobDB.loadView('JobDump', 'jobsByInputLFN', {}, [[workflow, logArch]])
if result['rows']:
logCollectJobId = result['rows'][0]['id']
result = fwjrDB.loadView('FWJRDump', 'outputByJobID', {}, [int(logCollectJobId)])
if result['rows']:
logCollectTarball = result['rows'][0]['value']['lfn']
else:
print "WARNING: The logArchive for job %s was in a LogCollect job but not tarball was produced" % jobId
# Print out the information
print "Job %s produced %s, the logArch for it is %s in %s" % (jobId, key[1], logArch, logCollectTarball)
return
示例7: main
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import loadView [as 别名]
def main():
sum = 0
x = Database('workqueue', 'http://vocms201.cern.ch:5984')
y = x.loadView('WorkQueue', 'availableByPriority', {'include_docs' : True})
loadDistribution = {}
for entry in y['rows']:
doc = entry['doc']
element = doc['WMCore.WorkQueue.DataStructs.WorkQueueElement.WorkQueueElement']
key = frozenset(element['SiteWhitelist'])
if key not in loadDistribution:
loadDistribution[key] = 0
loadDistribution[key] += element['Jobs']
for site, jobs in loadDistribution.items():
print "Site list %s has %d jobs" % (str(site), jobs)
示例8: main
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import loadView [as 别名]
def main():
if len(sys.argv) != 2:
print "Usage:"
print "python CheckWorkQueueElements.py <workflowName>"
sys.exit(0)
workflow = sys.argv[1]
x = Database('workqueue', 'https://cmsweb.cern.ch/couchdb')
y = x.loadView('WorkQueue', 'elementsByParent', {'include_docs' : True}, [workflow])
for entry in y['rows']:
doc = entry['doc']
element = doc['WMCore.WorkQueue.DataStructs.WorkQueueElement.WorkQueueElement']
if element['Status'] != 'Done':
print 'Element: %s is %s in %s' % (doc['_id'], element['Status'], element['ChildQueueUrl'])
示例9: main
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import loadView [as 别名]
def main():
if "WMAGENT_CONFIG" not in os.environ:
os.environ["WMAGENT_CONFIG"] = '/data/srv/wmagent/current/config/wmagent/config.py'
myThread = threading.currentThread()
connectToDB()
formatter = DBFormatter(logging, myThread.dbi)
limboFiles = formatter.formatDict(myThread.dbi.processData("""SELECT dbsbuffer_workflow.name, dbsbuffer_file.lfn
FROM dbsbuffer_file
INNER JOIN dbsbuffer_workflow ON
dbsbuffer_file.workflow = dbsbuffer_workflow.id
LEFT OUTER JOIN dbsbuffer_block ON
dbsbuffer_file.block_id = dbsbuffer_block.id
WHERE dbsbuffer_file.status = 'READY' AND
dbsbuffer_block.id is NULL"""))
if not limboFiles:
print "There are no bad files to fix"
return
for entry in limboFiles:
data = Database('wmagent_jobdump/fwjrs', 'http://%s:5984' % socket.gethostname())
result = data.loadView('FWJRDump', 'jobsByOutputLFN', {'include_docs' : True},
[[entry['name'], entry['lfn']]])['rows']
if result:
result = result[0]
fwjr = result['doc']['fwjr']
for step in fwjr['steps']:
if step == 'cmsRun1':
stepInfo = fwjr['steps'][step]
site = stepInfo['site']
break
else:
print "Could not find location for %s" % entry['lfn']
continue
se = myThread.dbi.processData("""SELECT wmbs_location_senames.se_name FROM
wmbs_location_senames
INNER JOIN wmbs_location ON
wmbs_location.id = wmbs_location_senames.location
WHERE wmbs_location.site_name = '%s'""" % site)
se = formatter.formatDict(se)[0]
insertQuery = """INSERT INTO dbsbuffer_location (se_name)
SELECT '%s' AS se_name FROM DUAL WHERE NOT EXISTS
(SELECT se_name FROM dbsbuffer_location WHERE se_name = '%s')""" % (se['se_name'], se['se_name'])
myThread.dbi.processData(insertQuery)
updateQuery = """INSERT INTO dbsbuffer_file_location (filename, location)
SELECT df.id, dl.id
FROM dbsbuffer_file df, dbsbuffer_location dl
WHERE df.lfn = '%s'
AND dl.se_name = '%s'""" % (entry['lfn'], se['se_name'])
myThread.dbi.processData(updateQuery)
updateQuery = """UPDATE dbsbuffer_file SET status = 'NOTUPLOADED' WHERE lfn = '%s'""" % entry['lfn']
myThread.dbi.processData(updateQuery)
示例10: main
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import loadView [as 别名]
def main():
db = Database('wmagent_jobdump/fwjrs', 'http://vocms237.cern.ch:5984')
results = db.loadView('FWJRDump', 'fwjrsByWorkflowName', {'startkey': ['pdmvserv_TOP-Summer12pLHE-00001_3_v0_STEP0ATCERN_130728_164313_3585'],
'endkey' : ['pdmvserv_TOP-Summer12pLHE-00001_3_v0_STEP0ATCERN_130728_164313_3585', {}],
'include_docs' : True})
globalJobTime = 0.0
globalEvents = 0.0
globalCPUTime = 0.0
globalCPUEventTime = 0.0
count = 0
rows = results['rows']
for entry in rows:
doc = entry['doc']
fwjr = doc['fwjr']
task = fwjr['task']
if task == '/pdmvserv_TOP-Summer12pLHE-00001_3_v0_STEP0ATCERN_130728_164313_3585/Production':
steps = fwjr['steps']
breakLoop = False
cmsRunStep = None
for step in steps:
if steps[step]['status'] != 0 and step != 'logArch1':
breakLoop = True
break
if step == 'cmsRun1':
cmsRunStep = steps[step]
if breakLoop:
continue
count += 1
performance = cmsRunStep['performance']
totalJobTime = float(performance['cpu']['TotalJobTime'])
globalJobTime += totalJobTime
cpuTime = float(performance['cpu']['TotalJobCPU'])
globalCPUTime += cpuTime
cpuEventTime = float(performance['cpu']['TotalEventCPU'])
globalCPUEventTime += cpuEventTime
events = 10000
globalEvents += events
timePerJob = globalJobTime/count
if timePerJob > 3600:
timePerJob = timePerJob/3600.0
print 'Average job duration: %.2f hours' % timePerJob
else:
print 'Average job duration: %.0f seconds' % timePerJob
print 'Job time per event: %.2f seconds' % (globalJobTime/globalEvents)
print 'Average job CPU time: %.0f seconds' % (globalCPUTime/count)
print 'Average event CPU time: %.8f seconds' % (cpuEventTime/globalEvents)
print 'Events processed: %d' % globalEvents
print 'Jobs processed: %d' % count
示例11: getLogArchForJob
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import loadView [as 别名]
def getLogArchForJob(jobId, workflow):
# Connect to the Job and FWJR DBs
jobDB = Database('wmagent_jobdump/jobs', 'http://dummy.cern.ch:5984')
fwjrDB = Database('wmagent_jobdump/fwjrs', 'http://dummy.cern.ch:5984')
# Get the logArchives for the job
result = fwjrDB.loadView('FWJRDump', 'logArchivesByJobID', {'startkey' : [int(jobId)], 'endkey' : [int(jobId), {}]})
lastLogArch = sorted(result['rows'], key = lambda x: x['value']['retrycount'])[-1]['value']['lfn']
# Get the logCollect job for the logArch, if any
logCollectTarball = ''
result = jobDB.loadView('JobDump', 'jobsByInputLFN', {}, [[workflow, lastLogArch]])
if result['rows']:
logCollectJobId = result['rows'][0]['id']
result = fwjrDB.loadView('FWJRDump', 'outputByJobID', {}, [int(logCollectJobId)])
if result['rows']:
logCollectTarball = result['rows'][0]['value']['lfn']
else:
print "WARNING: The logArchive for job %s was in a LogCollect job but not tarball was produced" % jobId
# Print out the information
print "The logArch for job %s is %s in %s" % (jobId, lastLogArch, logCollectTarball)
return
示例12: retrieveResubmissionChildren
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import loadView [as 别名]
def retrieveResubmissionChildren(requestName, couchUrl, couchDBName):
"""
_retrieveResubmissionChildren_
Construct a list of request names which are the resubmission
offspring from a request. This is a recursive
call with a single requestName as input.
The result only includes the children and not the original request.
"""
childrenRequestNames = []
reqmgrDb = Database(couchDBName, couchUrl)
result = reqmgrDb.loadView('ReqMgr', 'childresubmissionrequests', keys = [requestName])['rows']
for child in result:
childrenRequestNames.append(child['id'])
childrenRequestNames.extend(retrieveResubmissionChildren(child['id'], couchUrl, couchDBName))
return childrenRequestNames
示例13: main
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import loadView [as 别名]
def main():
requestName = sys.argv[1]
x = Database('workqueue', 'https://cmsweb.cern.ch/couchdb')
y = x.loadView('WorkQueue', 'elementsByParent', {'include_docs' : True}, [requestName])
runningElements = []
for entry in y['rows']:
doc = entry['doc']
element = doc['WMCore.WorkQueue.DataStructs.WorkQueueElement.WorkQueueElement']
if element['Status'] == 'Running':
runningElements.append(doc)
print "Found %d elements running, fix them?" % len(runningElements)
inputData = raw_input("Type y/n: ")
if inputData != "y":
print "Aborting operation..."
return 0
for doc in runningElements:
doc['WMCore.WorkQueue.DataStructs.WorkQueueElement.WorkQueueElement']['Status'] = 'Done'
x.queue(doc)
x.commit()
print "Operation complete!"
return 0
示例14: main
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import loadView [as 别名]
def main():
print "A"
db = Database('wmagent_jobdump/jobs', 'http://vocms202.cern.ch:5984')
results = db.loadView('JobDump', 'jobsByWorkflowName', {'startkey': ['pdmvserv_PixelRecover53_537p4_130116_130722_4919'],
'endkey' : ['pdmvserv_PixelRecover53_537p4_130116_130722_4919', {}],
'include_docs' : True})
rows = results['rows']
fileInfo = {}
for entry in rows:
doc = entry['doc']
jobType = doc['jobType']
if jobType != 'Processing':
continue
mask = doc['mask']
inputFiles = doc['inputfiles']
rAndl = mask['runAndLumis']
for file in inputFiles:
lfn = file['lfn']
if lfn not in fileInfo:
fileInfo[lfn] = {}
for run in file['runs']:
runNumber = str(run['run_number'])
if runNumber not in rAndl:
continue
lumis = run['lumis']
for lumi in lumis:
if not lumiInMask(rAndl[runNumber], lumi):
continue
if runNumber not in fileInfo[lfn]:
fileInfo[lfn][runNumber] = {}
if lumi in fileInfo[lfn][runNumber]:
print "ALERT: Lumi %s from run %s is processed twice for file %s" % (lumi, runNumber, lfn)
fileInfo[lfn][runNumber][lumi].append(entry['id'])
print "Jobs processing it so far: %s" % str(fileInfo[lfn][runNumber][lumi])
else:
fileInfo[lfn][runNumber][lumi] = [entry['id']]
示例15: DatabaseNotFoundException
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import loadView [as 别名]
#.........这里部分代码省略.........
self.logger.error('Could not delete document: %s . Reason: %s ' % (prepid, ex))
return False
def update(self, doc={}):
if '_id' in doc:
self.logger.log('Updating document "%s" in "%s"' % (doc['_id'],self.db_name))
if self.__document_exists(doc):
if self.cache:
##JR the revision in the cache is not the one in the DB at this point
# will be retaken at next get
self.__save_to_cache(doc['_id'], None)
return self.save(doc)
self.logger.error('Failed to update document: %s' % (json.dumps(doc)))
return False
def update_all(self, docs=[]):
if not docs:
return False
for doc in docs:
if self.__document_exists(doc):
self.db.queue(doc)
try:
self.db.commit()
return True
except Exception as ex:
self.logger.error('Could not commit changes to database. Reason: %s' % (ex))
return False
def get_all(self, page_num=-1):
try:
limit, skip = self.__pagify(page_num)
if limit >= 0 and skip >= 0:
result = self.db.loadView(self.db_name, "all", options={'limit':limit,'skip':skip, 'include_docs':True})['rows']
res = map(lambda r : r['doc'], result)
return res
result = self.db.loadView(self.db_name, "all",options={'include_docs':True})['rows']
res = map(lambda r : r['doc'], result)
return res
except Exception as ex:
self.logger.error('Could not access view. Reason: %s' % (ex))
return []
def query(self, query='', page_num=0):
if not query:
result = self.get_all(page_num)
#res = map(lambda r : r['doc'], result)
return result
try:
result = self.__query(query, page=page_num)
#res = map(lambda r : r['doc'], result)
return result
except Exception as ex:
self.logger.error('Could not load view for query: <%s> . Reason: %s' % (query, ex))
return []
def unique_res(self,query_result):
docids = map(lambda doc : doc['_id'] , query_result)
docids_s = list(set(docids))
if len(docids) != len(docids_s):
docids_s = []
return_dict= copy.deepcopy( query_result )
for doc in query_result:
if not doc['_id'] in docids_s:
docids_s.append(doc['_id'])