本文整理汇总了Python中WMComponent.DBS3Buffer.DBSBufferUtil.DBSBufferUtil.getCompletedWorkflows方法的典型用法代码示例。如果您正苦于以下问题:Python DBSBufferUtil.getCompletedWorkflows方法的具体用法?Python DBSBufferUtil.getCompletedWorkflows怎么用?Python DBSBufferUtil.getCompletedWorkflows使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类WMComponent.DBS3Buffer.DBSBufferUtil.DBSBufferUtil
的用法示例。
在下文中一共展示了DBSBufferUtil.getCompletedWorkflows方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: DBSBufferUtilTest
# 需要导入模块: from WMComponent.DBS3Buffer.DBSBufferUtil import DBSBufferUtil [as 别名]
# 或者: from WMComponent.DBS3Buffer.DBSBufferUtil.DBSBufferUtil import getCompletedWorkflows [as 别名]
#.........这里部分代码省略.........
self.wmbsFactory = DAOFactory(package="WMCore.WMBS",
logger=myThread.logger,
dbinterface=myThread.dbi)
self.dbsUtil = DBSBufferUtil()
# Create two test dbsbuffer workflows
insertWorkflow = self.dbsbufferFactory(classname="InsertWorkflow")
insertWorkflow.execute("Test1", "Task1", 0, 0, 0, 0)
insertWorkflow.execute("Test2", "Task2", 0, 0, 0, 0)
# Update one workflow to "completed" state
updateWorkflow = self.dbsbufferFactory(classname="UpdateWorkflowsToCompleted")
updateWorkflow.execute(["Test1"])
# Create a test wmbs workflow
testWorkflow = Workflow(spec="somespec.xml", owner="Erik", name="Test1", task="Task1")
testWorkflow.create()
# Create a test dbsbuffer file
self.createTestFiles()
def createTestFiles(self):
"""
_createTestFiles_
Create some dbsbuffer test files with different statuses
:return:
"""
phedexStatus = self.dbsbufferFactory(classname="DBSBufferFiles.SetPhEDExStatus")
for i in range(0, 4):
lfn = "/path/to/some/lfn" + str(i)
# Two files should be InDBS, two files should be NOTUPLOADED
if i in [0,2]:
status = 'InDBS'
else:
status = 'NOTUPLOADED'
testDBSFile = DBSBufferFile(lfn=lfn, size=600000, events=60000, status=status, workflowId=1)
testDBSFile.setAlgorithm(appName="cmsRun", appVer="UNKNOWN",
appFam="RECO", psetHash="SOMEHASH" + str(i),
configContent="SOMECONTENT")
testDBSFile.setDatasetPath("/path/to/some/dataset")
testDBSFile.create()
# Create all four combinations of status(InDBS,NOTUPLOADED) and in_phedex(0,1)
if i in [0,1]:
phedexStatus.execute(lfn, 1)
def tearDown(self):
"""
_tearDown_
Drop all the DBSBuffer tables.
"""
self.testInit.clearDatabase()
# List of methods to potentially test
# def loadDBSBufferFilesBulk(self, fileObjs):
# def findUploadableDAS(self):
# def testFindOpenBlocks(self):
# def loadBlocksByDAS(self, das):
#
# def loadBlocks(self, blocknames):
#
# def findUploadableFilesByDAS(self, datasetpath):
#
# def loadFilesByBlock(self, blockname):
def testGetPhEDExDBSStatusForCompletedWorkflows(self):
"""
_testGetPhEDExDBSStatusForCompletedWorkflows_
:return:
"""
results = self.dbsUtil.getPhEDExDBSStatusForCompletedWorkflows()
self.assertEqual(results["Test1"]["InDBS"], 2, "ERROR: Files with InDBS status is incorrect.")
self.assertEqual(results["Test1"]["InPhEDEx"], 2, "ERROR: Files with InPhEDEx status is incorrect.")
self.assertEqual(results["Test1"]["NotInDBS"], 2, "ERROR: Files with NotInDBS status is incorrect.")
self.assertEqual(results["Test1"]["NotInPhEDEx"], 2, "ERROR: Files with NotInPhEDEx status is incorrect.")
return
def testGetCompletedWorkflows(self):
"""
_testGetCompletedWorkflows_
:return:
"""
results = self.dbsUtil.getCompletedWorkflows()
self.assertEqual(len(results), 1, "ERROR: GetCompletedWorkflows returned incorrect number of completed workflows.")
self.assertIn("Test1", results, "ERROR: GetCompletedWorkflows returned incorrect workflow.")
return
示例2: DBSUploadPoller
# 需要导入模块: from WMComponent.DBS3Buffer.DBSBufferUtil import DBSBufferUtil [as 别名]
# 或者: from WMComponent.DBS3Buffer.DBSBufferUtil.DBSBufferUtil import getCompletedWorkflows [as 别名]
#.........这里部分代码省略.........
# It should be accounted for somewhere
# Or loaded with the block
continue
# Check if we can put files in this block
if not self.isBlockOpen(newFile=newFile,
block=currentBlock):
# Then we have to close the block and get a new one
currentBlock.setPendingAndCloseBlock()
readyBlocks.append(currentBlock)
currentBlock = self.getBlock(newFile=newFile,
location=location)
currentBlock.setAcquisitionEra(era=dspInfo['AcquisitionEra'])
currentBlock.setProcessingVer(procVer=dspInfo['ProcessingVer'])
# Now deal with the file
currentBlock.addFile(newFile, self.datasetType, self.primaryDatasetType)
self.filesToUpdate.append({'filelfn': newFile['lfn'],
'block': currentBlock.getName()})
# Done with the location
readyBlocks.append(currentBlock)
for block in readyBlocks:
self.blockCache[block.getName()] = block
return
def checkBlockCompletion(self):
"""
_checkBlockCompletion_
Mark Open blocks as Pending if they have timed out or their workflows have completed
"""
completedWorkflows = self.dbsUtil.getCompletedWorkflows()
for block in self.blockCache.values():
if block.status == "Open":
if (block.getTime() > block.getMaxBlockTime()) or any(
key in completedWorkflows for key in block.workflows):
block.setPendingAndCloseBlock()
return
def isBlockOpen(self, newFile, block, doTime=False):
"""
_isBlockOpen_
Check and see if a block is full
This will check on time, but that's disabled by default
The plan is to do a time check after we do everything else,
so open blocks about to time out can still get more
files put in them.
"""
if block.getMaxBlockFiles() is None or block.getMaxBlockNumEvents() is None or \
block.getMaxBlockSize() is None or block.getMaxBlockTime() is None:
return True
if block.status != 'Open':
# Then somebody has dumped this already
return False
if block.getSize() + newFile['size'] > block.getMaxBlockSize():
return False
if block.getNumEvents() + newFile['events'] > block.getMaxBlockNumEvents():
return False
if block.getNFiles() >= block.getMaxBlockFiles():
# Then we have to dump it because this file
# will put it over the limit.