本文整理汇总了Python中WMCore.Services.RequestDB.RequestDBWriter.RequestDBWriter.updateRequestStatus方法的典型用法代码示例。如果您正苦于以下问题:Python RequestDBWriter.updateRequestStatus方法的具体用法?Python RequestDBWriter.updateRequestStatus怎么用?Python RequestDBWriter.updateRequestStatus使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类WMCore.Services.RequestDB.RequestDBWriter.RequestDBWriter
的用法示例。
在下文中一共展示了RequestDBWriter.updateRequestStatus方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: moveToArchived
# 需要导入模块: from WMCore.Services.RequestDB.RequestDBWriter import RequestDBWriter [as 别名]
# 或者: from WMCore.Services.RequestDB.RequestDBWriter.RequestDBWriter import updateRequestStatus [as 别名]
def moveToArchived(self, config):
"""
gather active data statistics
"""
testbedWMStats = WMStatsReader(config.wmstats_url, reqdbURL=config.reqmgrdb_url)
reqdbWriter = RequestDBWriter(config.reqmgrdb_url)
statusTransition = {"aborted": ["aborted-completed", "aborted-archived"], "rejected": ["rejected-archived"]}
for status, nextStatusList in statusTransition.items():
requests = testbedWMStats.getRequestByStatus([status], jobInfoFlag=True, legacyFormat=True)
self.logger.info("checking %s workflows: %d" % (status, len(requests)))
if len(requests) > 0:
requestCollection = RequestInfoCollection(requests)
requestsDict = requestCollection.getData()
numOfArchived = 0
for requestName, requestInfo in requestsDict.items():
if requestInfo.getJobSummary().getTotalJobs() == 0:
for nextStatus in nextStatusList:
reqdbWriter.updateRequestStatus(requestName, nextStatus)
numOfArchived += 1
self.logger.info("Total %s-archieved: %d" % (status, numOfArchived))
return
示例2: T0RequestDBTest
# 需要导入模块: from WMCore.Services.RequestDB.RequestDBWriter import RequestDBWriter [as 别名]
# 或者: from WMCore.Services.RequestDB.RequestDBWriter.RequestDBWriter import updateRequestStatus [as 别名]
class T0RequestDBTest(unittest.TestCase):
"""
"""
def setUp(self):
"""
_setUp_
"""
self.schema = []
self.couchApps = ["T0Request"]
self.testInit = TestInitCouchApp('RequestDBServiceTest')
self.testInit.setLogging()
self.testInit.setDatabaseConnection()
self.testInit.setSchema(customModules = self.schema,
useDefault = False)
dbName = 't0_requsetdb_t'
self.testInit.setupCouch(dbName, *self.couchApps)
reqDBURL = "%s/%s" % (self.testInit.couchUrl, dbName)
self.requestWriter = RequestDBWriter(reqDBURL, self.couchApps[0])
self.requestReader = RequestDBReader(reqDBURL, self.couchApps[0])
self.requestWriter.defaultStale = {}
self.requestReader.defaultStale = {}
return
def tearDown(self):
"""
_tearDown_
Drop all the WMBS tables.
"""
self.testInit.tearDownCouch()
def testRequestDBWriter(self):
# test getWork
schema = generate_reqmgr_schema()
result = self.requestWriter.insertGenericRequest(schema[0])
self.assertEqual(len(result), 1, 'insert fail');
result = self.requestWriter.updateRequestStatus(schema[0]['RequestName'], "assigned")
self.assertEqual(result, 'not allowed state assigned', 'update fail')
self.assertEqual(self.requestWriter.updateRequestStatus("not_exist_schema", "new"),
'Error: document not found')
allowedStates = ["Closed", "Merge", "AlcaSkim", "Harvesting",
"Processing Done", "completed"]
for state in allowedStates:
self.assertEqual(self.requestWriter.updateRequestStatus(schema[0]['RequestName'], state),
'OK')
self.assertEqual(self.requestWriter.updateRequestStatus(schema[0]['RequestName'], "Processing Done"),
'not allowed transition completed to Processing Done')
self.assertEqual(self.requestWriter.updateRequestStatus(schema[0]['RequestName'], "normal-archived"),
'OK')
result = self.requestWriter.getRequestByStatus(["normal-archived"], False, 1)
self.assertEqual(len(result), 1, "should be 1 but %s" % result)
示例3: Request
# 需要导入模块: from WMCore.Services.RequestDB.RequestDBWriter import RequestDBWriter [as 别名]
# 或者: from WMCore.Services.RequestDB.RequestDBWriter.RequestDBWriter import updateRequestStatus [as 别名]
#.........这里部分代码省略.........
request_args['GracePeriod'] = int(request_args['GracePeriod'])
request_args['HardTimeout'] = request_args['SoftTimeout'] + request_args['GracePeriod']
#Only allow extra value update for assigned status
cherrypy.log("INFO: Assign request, input args: %s ..." % request_args)
try:
workload.updateArguments(request_args)
except Exception as ex:
msg = traceback.format_exc()
cherrypy.log("Error for request args %s: %s" % (request_args, msg))
raise InvalidSpecParameterValue(str(ex))
# validate/update OutputDatasets after ProcessingString and AcquisionEra is updated
request_args['OutputDatasets'] = workload.listOutputDatasets()
validateOutputDatasets(request_args['OutputDatasets'], workload.getDbsUrl())
# legacy update schema to support ops script
loadRequestSchema(workload, request_args)
report = self.reqmgr_db_service.updateRequestProperty(workload.name(), request_args, dn)
workload.saveCouch(self.config.couch_host, self.config.couch_reqmgr_db)
return report
def _handleCascadeUpdate(self, workload, request_args, dn):
"""
only closed-out and announced has this option
"""
req_status = request_args["RequestStatus"]
# check whehter it is casecade option
if request_args["cascade"]:
cascade_list = self._retrieveResubmissionChildren(workload.name())
for req_name in cascade_list:
self.reqmgr_db_service.updateRequestStatus(req_name, req_status, dn)
# update original workflow status
report = self.reqmgr_db_service.updateRequestStatus(workload.name(), req_status, dn)
return report
def _handleOnlyStateTransition(self, workload, req_status, dn):
"""
It handles only the state transition. Special handling needed if a
request is aborted or force completed.
"""
if req_status in ["aborted", "force-complete"]:
# cancel the workflow first
self.gq_service.cancelWorkflow(workload.name())
#update the request status in couchdb
report = self.reqmgr_db_service.updateRequestStatus(workload.name(), req_status, dn)
return report
def _updateRequest(self, workload, request_args):
dn = cherrypy.request.user.get("dn", "unknown")
if workload is None:
(workload, request_args) = self.initialize_clone(request_args["OriginalRequestName"])
return self.post([workload, request_args])
if "RequestStatus" not in request_args:
report = self._handleNoStatusUpdate(workload, request_args)
else:
req_status = request_args["RequestStatus"]
# assignment-approved only allow Priority update
if len(request_args) == 2 and req_status == "assignment-approved":
report = self._handleAssignmentApprovedTransition(workload, request_args, dn)
elif len(request_args) > 1 and req_status == "assigned":
示例4: WMStatsTest
# 需要导入模块: from WMCore.Services.RequestDB.RequestDBWriter import RequestDBWriter [as 别名]
# 或者: from WMCore.Services.RequestDB.RequestDBWriter.RequestDBWriter import updateRequestStatus [as 别名]
class WMStatsTest(unittest.TestCase):
"""
"""
def setUp(self):
"""
_setUp_
"""
self.schema = []
self.couchApps = ["WMStats"]
self.testInit = TestInitCouchApp('WorkQueueServiceTest')
self.testInit.setLogging()
self.testInit.setDatabaseConnection()
self.testInit.setSchema(customModules = self.schema,
useDefault = False)
dbName = 'wmstats_t'
self.testInit.setupCouch(dbName, "WMStats")
reqDBName = "reqmgrdb_t"
self.testInit.setupCouch(reqDBName, "ReqMgr")
wmstatsURL = "%s/%s" % (self.testInit.couchUrl, dbName)
reqDBURL = "%s/%s" % (self.testInit.couchUrl, reqDBName)
self.reqDBWriter = RequestDBWriter(reqDBURL)
self.wmstatsReader = WMStatsReader(wmstatsURL, reqdbURL=reqDBURL)
self.wmstatsReader.defaultStale = {}
self.wmstatsReader.reqDB.defaultStale = {}
return
def tearDown(self):
"""
_tearDown_
Drop all the WMBS tables.
"""
self.testInit.tearDownCouch()
def testWMStatsWriter(self):
# test getWork
schema = generate_reqmgr_schema()
result = self.reqDBWriter.insertGenericRequest(schema[0])
self.assertEquals(result[0]['ok'], True, 'insert fail')
result = self.reqDBWriter.updateRequestStatus(schema[0]['RequestName'], "failed")
self.assertEquals(result, 'OK', 'update fail')
result = self.reqDBWriter.updateRequestStatus("not_exist_schema", "assigned")
self.assertEquals(result,'Error: document not found')
result = self.reqDBWriter.updateRequestProperty(schema[0]['RequestName'], {"Teams": ['teamA']})
self.assertEquals(result, 'OK', 'update fail')
result = self.reqDBWriter.updateRequestProperty("not_exist_schema", {"Teams": ['teamA']})
self.assertEquals(result, 'Error: document not found')
totalStats = {'TotalEstimatedJobs': 100, 'TotalInputEvents': 1000, 'TotalInputLumis': 1234, 'TotalInputFiles': 5}
result = self.reqDBWriter.updateRequestProperty(schema[0]['RequestName'], totalStats)
self.assertEquals(result, 'OK', 'update fail')
result = self.reqDBWriter.updateRequestProperty(schema[0]['RequestName'], totalStats)
self.assertEquals(result, 'OK', 'update fail')
result = self.reqDBWriter.updateRequestProperty("not_exist_schema", totalStats)
self.assertEquals(result, 'Error: document not found')
spec1 = newWorkload(schema[0]['RequestName'])
production = spec1.newTask("Production")
production.setTaskType("Merge")
production.setSiteWhitelist(['TEST_SITE'])
properties = {"RequestPriority": spec1.priority(),
'SiteWhitelist': spec1.getTopLevelTask()[0].siteWhitelist(),
'OutputDatasets': spec1.listOutputDatasets()}
result = self.reqDBWriter.updateRequestProperty(spec1.name(), properties)
self.assertEquals(result, 'OK', 'update fail')
spec2 = newWorkload("not_exist_schema")
production = spec2.newTask("Production")
production.setTaskType("Merge")
properties = {"RequestPriority": spec2.priority(),
'SiteWhitelist': spec2.getTopLevelTask()[0].siteWhitelist(),
'OutputDatasets': spec2.listOutputDatasets()}
result = self.reqDBWriter.updateRequestProperty(spec2.name(), properties)
self.assertEquals(result, 'Error: document not found')
requests = self.wmstatsReader.getRequestByStatus(["failed"], jobInfoFlag = False, legacyFormat = True)
self.assertEquals(requests.keys(), [schema[0]['RequestName']])
requestCollection = RequestInfoCollection(requests)
result = requestCollection.getJSONData()
self.assertEquals(result.keys(), [schema[0]['RequestName']])
requests = self.wmstatsReader.getActiveData()
self.assertEquals(requests.keys(), [schema[0]['RequestName']])
requests = self.wmstatsReader.getRequestByStatus(["failed"])
self.assertEquals(requests.keys(), [schema[0]['RequestName']])
requests = self.wmstatsReader.getRequestSummaryWithJobInfo(schema[0]['RequestName'])
self.assertEquals(requests.keys(), [schema[0]['RequestName']])
示例5: TaskArchiverTest
# 需要导入模块: from WMCore.Services.RequestDB.RequestDBWriter import RequestDBWriter [as 别名]
# 或者: from WMCore.Services.RequestDB.RequestDBWriter.RequestDBWriter import updateRequestStatus [as 别名]
#.........这里部分代码省略.........
instLuminosity = i*binSize
timePerEvent = points[i]
if instLuminosity > minLumi and instLuminosity < maxLumi :
worthPoints[instLuminosity] = timePerEvent
return worthPoints
def publishPerformanceDashBoard(self, dashBoardUrl, PD, release, worthPoints):
dashboardPayload = []
for instLuminosity in worthPoints :
timePerEvent = int(worthPoints[instLuminosity])
dashboardPayload.append({"primaryDataset" : PD,
"release" : release,
"integratedLuminosity" : instLuminosity,
"timePerEvent" : timePerEvent})
data = "{\"data\":%s}" % str(dashboardPayload).replace("\'","\"")
# let's suppose it works..
testDashBoardPayloadFile = open(os.path.join(getTestBase(),
'WMComponent_t/TaskArchiver_t/DashBoardPayload.json'), 'r')
testDashBoardPayload = testDashBoardPayloadFile.read()
testDashBoardPayloadFile.close()
self.assertEqual(data, testDashBoardPayload)
return True
def populateWorkflowWithCompleteStatus(self, name ="TestWorkload"):
schema = generate_reqmgr_schema(1)
schema[0]["RequestName"] = name
self.requestWriter.insertGenericRequest(schema[0])
result = self.requestWriter.updateRequestStatus(name, "completed")
return result
def testA_BasicFunctionTest(self):
"""
_BasicFunctionTest_
Tests the components, by seeing if they can process a simple set of closeouts
"""
myThread = threading.currentThread()
config = self.getConfig()
workloadPath = os.path.join(self.testDir, 'specDir', 'spec.pkl')
workload = self.createWorkload(workloadName = workloadPath)
testJobGroup = self.createTestJobGroup(config = config,
name = workload.name(),
specLocation = workloadPath,
error = False)
# Create second workload
testJobGroup2 = self.createTestJobGroup(config = config,
name = workload.name(),
filesetName = "TestFileset_2",
specLocation = workloadPath,
task = "/TestWorkload/ReReco/LogCollect",
type = "LogCollect")
cachePath = os.path.join(config.JobCreator.jobCacheDir,
"TestWorkload", "ReReco")
os.makedirs(cachePath)
self.assertTrue(os.path.exists(cachePath))
示例6: Tier0PluginTest
# 需要导入模块: from WMCore.Services.RequestDB.RequestDBWriter import RequestDBWriter [as 别名]
# 或者: from WMCore.Services.RequestDB.RequestDBWriter.RequestDBWriter import updateRequestStatus [as 别名]
class Tier0PluginTest(unittest.TestCase):
def setUp(self):
"""
_setUp_
Setup the test environment
"""
self.testInit = TestInit(__file__)
self.testInit.setDatabaseConnection()
self.testInit.setSchema(["WMCore.WMBS"])
self.requestCouchDB = 'wmstats_plugin_t'
self.testInit.setupCouch(self.requestCouchDB, 'T0Request')
self.testDir = self.testInit.generateWorkDir()
reqDBURL = "%s/%s" % (os.environ['COUCHURL'], self.requestCouchDB)
self.requestDBWriter = RequestDBWriter(reqDBURL, couchapp="T0Request")
self.requestDBWriter._setNoStale()
self.stateMap = {}
self.orderedStates = []
self.plugin = None
return
def tearDown(self):
"""
_tearDown_
Clear databases and delete files
"""
self.testInit.tearDownCouch()
self.testInit.clearDatabase()
self.testInit.delWorkDir()
return
def setupRepackWorkflow(self):
"""
_setupRepackWorkflow_
Populate WMBS with a repack-like workflow,
every subscription must be unfinished at first
"""
workflowName = 'Repack_Run481516_StreamZ'
mergeTasks = ['RepackMergewrite_QuadElectron_RAW', 'RepackMergewrite_TriPhoton_RAW',
'RepackMergewrite_SingleNeutrino_RAW']
self.stateMap = {'Merge': [],
'Processing Done': []}
self.orderedStates = ['Merge', 'Processing Done']
# Populate WMStats
self.requestDBWriter.insertGenericRequest({'RequestName': workflowName})
self.requestDBWriter.updateRequestStatus(workflowName, 'Closed')
# Create a wmspec in disk
workload = newWorkload(workflowName)
repackTask = workload.newTask('Repack')
for task in mergeTasks:
repackTask.addTask(task)
repackTask.addTask('RepackCleanupUnmergedwrite_QuadElectron_RAW')
specPath = os.path.join(self.testDir, 'Repack.pkl')
workload.save(specPath)
# Populate WMBS
topFileset = Fileset(name='TestStreamerFileset')
topFileset.create()
options = {'spec': specPath, 'owner': 'ItsAMeMario',
'name': workflowName, 'wfType': 'tier0'}
topLevelWorkflow = Workflow(task='/%s/Repack' % workflowName,
**options)
topLevelWorkflow.create()
topLevelSub = Subscription(topFileset, topLevelWorkflow)
topLevelSub.create()
self.stateMap['Merge'].append(topFileset)
for task in mergeTasks:
mergeWorkflow = Workflow(task='/%s/Repack/%s' % (workflowName, task), **options)
mergeWorkflow.create()
unmergedFileset = Fileset(name='TestUnmergedFileset%s' % task)
unmergedFileset.create()
mergeSub = Subscription(unmergedFileset, mergeWorkflow)
mergeSub.create()
self.stateMap['Processing Done'].append(unmergedFileset)
cleanupWorkflow = Workflow(task='/Repack_Run481516_StreamZ/Repack/RepackCleanupUnmergedwrite_QuadElectron_RAW',
**options)
cleanupWorkflow.create()
unmergedFileset = Fileset(name='TestUnmergedFilesetToCleanup')
unmergedFileset.create()
cleanupSub = Subscription(unmergedFileset, cleanupWorkflow)
cleanupSub.create()
return
def setupExpressWorkflow(self):
"""
_setupExpressWorkflow_
Populate WMBS with a express-like workflow,
#.........这里部分代码省略.........
示例7: Tier0FeederPoller
# 需要导入模块: from WMCore.Services.RequestDB.RequestDBWriter import RequestDBWriter [as 别名]
# 或者: from WMCore.Services.RequestDB.RequestDBWriter.RequestDBWriter import updateRequestStatus [as 别名]
#.........这里部分代码省略.........
"""
getNotClosedOutWorkflowsDAO = self.daoFactory(classname = "Tier0Feeder.GetNotClosedOutWorkflows")
workflows = getNotClosedOutWorkflowsDAO.execute()
if len(workflows) == 0:
logging.debug("No workflows to publish to couch monitoring, doing nothing")
if workflows:
for workflow in workflows:
(workflowId, filesetId, filesetOpen, workflowName) = workflow
# find returns -1 if the string is not found
if workflowName.find('PromptReco') >= 0:
logging.debug("Closing out instantaneously PromptReco Workflow %s" % workflowName)
self.updateClosedState(workflowName, workflowId)
else :
# Check if fileset (which you already know) is closed or not
# FIXME: No better way to do it? what comes from the DAO is a string, casting bool or int doesn't help much.
# Works like that :
if filesetOpen == '0':
self.updateClosedState(workflowName, workflowId)
return
def updateClosedState(self, workflowName, workflowId):
"""
_updateClosedState_
Mark a workflow as Closed
"""
markCloseoutWorkflowMonitoringDAO = self.daoFactory(classname = "Tier0Feeder.MarkCloseoutWorkflowMonitoring")
response = self.localRequestCouchDB.updateRequestStatus(workflowName, 'Closed')
if response == "OK" or "EXISTS":
logging.debug("Successfully closed workflow %s" % workflowName)
markCloseoutWorkflowMonitoringDAO.execute(workflowId)
return
def notifyStorageManager(self):
"""
_notifyStorageManager_
Find all finished streamers for closed all run/stream
Send the notification message to StorageManager
Update the streamer status to finished (deleted = 1)
"""
getFinishedStreamersDAO = self.daoFactory(classname = "SMNotification.GetFinishedStreamers")
markStreamersFinishedDAO = self.daoFactory(classname = "SMNotification.MarkStreamersFinished")
allFinishedStreamers = getFinishedStreamersDAO.execute(transaction = False)
num = len(allFinishedStreamers)/50
for finishedStreamers in [allFinishedStreamers[i::num] for i in range(num)]:
streamers = []
filenameParams = ""
for (id, lfn) in finishedStreamers:
streamers.append(id)
filenameParams += "-FILENAME %s " % os.path.basename(lfn)
logging.debug("Notifying transfer system about processed streamers")
示例8: TaskArchiverPoller
# 需要导入模块: from WMCore.Services.RequestDB.RequestDBWriter import RequestDBWriter [as 别名]
# 或者: from WMCore.Services.RequestDB.RequestDBWriter.RequestDBWriter import updateRequestStatus [as 别名]
#.........这里部分代码省略.........
def getFinishedWorkflows(self):
"""
1. Get finished workflows (a finished workflow is defined in Workflow.GetFinishedWorkflows)
2. Get finished workflows with logCollect and Cleanup only.
3. combined those and make return
finishedwfs - without LogCollect and CleanUp task
finishedwfsWithLogCollectAndCleanUp - including LogCollect and CleanUp task
"""
finishedWorkflowsDAO = self.daoFactory(classname="Workflow.GetFinishedWorkflows")
finishedwfs = finishedWorkflowsDAO.execute()
finishedLogCollectAndCleanUpwfs = finishedWorkflowsDAO.execute(onlySecondary=True)
finishedwfsWithLogCollectAndCleanUp = {}
for wf in finishedLogCollectAndCleanUpwfs:
if wf in finishedwfs:
finishedwfsWithLogCollectAndCleanUp[wf] = finishedwfs[wf]
return (finishedwfs, finishedwfsWithLogCollectAndCleanUp)
def killCondorJobsByWFStatus(self, statusList):
if isinstance(statusList, basestring):
statusList = [statusList]
reqNames = self.centralCouchDBWriter.getRequestByStatus(statusList)
logging.info("There are %d requests in %s status in central couch.", len(reqNames), statusList)
self.workQueue.killWMBSWorkflows(reqNames)
return reqNames
def completeTasks(self, finishedwfs):
"""
_completeTasks_
This method will call several auxiliary methods to do the following:
1. Notify the WorkQueue about finished subscriptions
2. mark workflow as completed in the dbsbuffer_workflow table
"""
if not finishedwfs:
return
logging.info("Found %d candidate workflows for completing:", len(finishedwfs))
completedWorkflowsDAO = self.dbsDaoFactory(classname="UpdateWorkflowsToCompleted")
centralCouchAlive = True
try:
self.killCondorJobsByWFStatus(["force-complete", "aborted"])
except Exception as ex:
centralCouchAlive = False
logging.error("we will try again when remote couch server comes back\n%s", str(ex))
if centralCouchAlive:
logging.info("Marking subscriptions as Done ...")
for workflow in finishedwfs:
try:
# Notify the WorkQueue, if there is one
if self.workQueue is not None:
subList = []
for l in finishedwfs[workflow]["workflows"].values():
subList.extend(l)
self.notifyWorkQueue(subList)
# Tier-0 case, the agent has to mark it completed
if not self.useReqMgrForCompletionCheck:
self.requestLocalCouchDB.updateRequestStatus(workflow, "completed")
logging.info("status updated to completed %s", workflow)
completedWorkflowsDAO.execute([workflow])
except TaskArchiverPollerException as ex:
# Something didn't go well when notifying the workqueue, abort!!!
logging.error("Something bad happened while archiving tasks.")
logging.error(str(ex))
continue
except Exception as ex:
# Something didn't go well on couch, abort!!!
msg = "Problem while archiving tasks for workflow %s\n" % workflow
msg += "Exception message: %s" % str(ex)
msg += "\nTraceback: %s" % traceback.format_exc()
logging.error(msg)
continue
return
def notifyWorkQueue(self, subList):
"""
_notifyWorkQueue_
Tells the workQueue component that a particular subscription,
or set of subscriptions, is done. Receives confirmation
"""
for sub in subList:
try:
self.workQueue.doneWork(SubscriptionId=sub)
except WorkQueueNoMatchingElements:
# Subscription wasn't known to WorkQueue, feel free to clean up
logging.debug("Local WorkQueue knows nothing about this subscription: %s", sub)
except Exception as ex:
msg = "Error talking to workqueue: %s\n" % str(ex)
msg += "Tried to complete the following: %s\n" % sub
raise TaskArchiverPollerException(msg)
return
示例9: TaskArchiverPoller
# 需要导入模块: from WMCore.Services.RequestDB.RequestDBWriter import RequestDBWriter [as 别名]
# 或者: from WMCore.Services.RequestDB.RequestDBWriter.RequestDBWriter import updateRequestStatus [as 别名]
#.........这里部分代码省略.........
# update the completed flag in dbsbuffer_workflow table so blocks can be closed
# create updateDBSBufferWorkflowComplete DAO
if len(finishedwfs) == 0:
return
completedWorkflowsDAO = self.dbsDaoFactory(classname = "UpdateWorkflowsToCompleted")
centralCouchAlive = True
try:
#TODO: need to enable when reqmgr2 -wmstats is ready
#abortedWorkflows = self.reqmgrCouchDBWriter.getRequestByStatus(["aborted"], format = "dict");
abortedWorkflows = self.centralCouchDBWriter.getRequestByStatus(["aborted"])
logging.info("There are %d requests in 'aborted' status in central couch." % len(abortedWorkflows))
forceCompleteWorkflows = self.centralCouchDBWriter.getRequestByStatus(["force-complete"])
logging.info("List of 'force-complete' workflows in central couch: %s" % forceCompleteWorkflows)
except Exception as ex:
centralCouchAlive = False
logging.error("we will try again when remote couch server comes back\n%s" % str(ex))
if centralCouchAlive:
for workflow in finishedwfs:
try:
#Notify the WorkQueue, if there is one
if self.workQueue != None:
subList = []
logging.info("Marking subscriptions as Done ...")
for l in finishedwfs[workflow]["workflows"].values():
subList.extend(l)
self.notifyWorkQueue(subList)
#Now we know the workflow as a whole is gone, we can delete the information from couch
if not self.useReqMgrForCompletionCheck:
self.requestLocalCouchDB.updateRequestStatus(workflow, "completed")
logging.info("status updated to completed %s" % workflow)
if workflow in abortedWorkflows:
#TODO: remove when reqmgr2-wmstats deployed
newState = "aborted-completed"
elif workflow in forceCompleteWorkflows:
newState = "completed"
else:
newState = None
if newState != None:
# update reqmgr workload document only request mgr is installed
if not self.useReqMgrForCompletionCheck:
# commented out untill all the agent is updated so every request have new state
# TODO: agent should be able to write reqmgr db diretly add the right group in
# reqmgr
self.requestLocalCouchDB.updateRequestStatus(workflow, newState)
else:
try:
#TODO: try reqmgr1 call if it fails (reqmgr2Only - remove this line when reqmgr is replaced)
logging.info("Updating status to '%s' in both oracle and couchdb ..." % newState)
self.reqmgrSvc.updateRequestStatus(workflow, newState)
#And replace with this - remove all the excption
#self.reqmgr2Svc.updateRequestStatus(workflow, newState)
except httplib.HTTPException as ex:
# If we get an HTTPException of 404 means reqmgr2 request
if ex.status == 404:
# try reqmgr2 call
msg = "%s : reqmgr2 request: %s" % (workflow, str(ex))
logging.warning(msg)
self.reqmgr2Svc.updateRequestStatus(workflow, newState)
else:
示例10: Request
# 需要导入模块: from WMCore.Services.RequestDB.RequestDBWriter import RequestDBWriter [as 别名]
# 或者: from WMCore.Services.RequestDB.RequestDBWriter.RequestDBWriter import updateRequestStatus [as 别名]
#.........这里部分代码省略.........
(workload, request_args) = self.initialize_clone(request_args["OriginalRequestName"])
return self.post(workload, request_args)
dn = cherrypy.request.user.get("dn", "unknown")
if ('SoftTimeout' in request_args) and ('GracePeriod' in request_args):
request_args['HardTimeout'] = request_args['SoftTimeout'] + request_args['GracePeriod']
if 'RequestPriority' in request_args:
self.gq_service.updatePriority(workload.name(), request_args['RequestPriority'])
if "total_jobs" in request_args:
# only GQ update this stats
# request_args should contain only 4 keys 'total_jobs', 'input_lumis', 'input_events', 'input_num_files'}
report = self.reqmgr_db_service.updateRequestStats(workload.name(), request_args)
# if is not just updating status
else:
req_status = request_args.get("RequestStatus", None)
if len(request_args) >= 1 and req_status == None:
try:
workload.updateArguments(request_args)
except Exception as ex:
msg = traceback.format_exc()
cherrypy.log("Error for request args %s: %s" % (request_args, msg))
raise InvalidSpecParameterValue(str(ex))
# trailing / is needed for the savecouchUrl function
workload.saveCouch(self.config.couch_host, self.config.couch_reqmgr_db)
elif (req_status in ["closed-out" "announced"]) and request_args.get("cascade", False):
cascade_list = self._retrieveResubmissionChildren(workload.name)
for req_name in cascade_list:
report = self.reqmgr_db_service.updateRequestStatus(req_name, req_status)
# If it is aborted or force-complete transition call workqueue to cancel the request
else:
if req_status == "aborted" or req_status == "force-complete":
self.gq_service.cancelWorkflow(workload.name())
report = self.reqmgr_db_service.updateRequestProperty(workload.name(), request_args, dn)
if report == 'OK':
return {workload.name(): "OK"}
else:
return {workload.name(): "ERROR"}
@restcall(formats=[('application/json', JSONFormat())])
def put(self, workload_pair_list):
"workloadPairList is a list of tuple containing (workload, requeat_args)"
report = []
for workload, request_args in workload_pair_list:
result = self._updateRequest(workload, request_args)
report.append(result)
return report
@restcall(formats=[('application/json', JSONFormat())])
def delete(self, request_name):
cherrypy.log("INFO: Deleting request document '%s' ..." % request_name)
try:
self.reqmgr_db.delete_doc(request_name)
except CouchError as ex:
msg = "ERROR: Delete failed."
cherrypy.log(msg + " Reason: %s" % ex)
raise cherrypy.HTTPError(404, msg)
# TODO
# delete should also happen on WMStats
示例11: CleanCouchPoller
# 需要导入模块: from WMCore.Services.RequestDB.RequestDBWriter import RequestDBWriter [as 别名]
# 或者: from WMCore.Services.RequestDB.RequestDBWriter.RequestDBWriter import updateRequestStatus [as 别名]
#.........这里部分代码省略.........
logging.info("Getting requests in '%s' state for team '%s'", self.deletableState,
self.teamName)
endTime = int(time.time()) - self.archiveDelayHours * 3600
wfs = self.centralRequestDBReader.getRequestByTeamAndStatus(self.teamName,
self.deletableState)
commonWfs = self.centralRequestDBReader.getRequestByStatusAndStartTime(self.deletableState,
False, endTime)
deletableWorkflows = list(set(wfs) & set(commonWfs))
logging.info("Ready to archive normal %s workflows", len(deletableWorkflows))
numUpdated = self.archiveWorkflows(deletableWorkflows, "normal-archived")
logging.info("archive normal %s workflows", numUpdated)
abortedWorkflows = self.centralRequestDBReader.getRequestByStatus(["aborted-completed"])
logging.info("Ready to archive aborted %s workflows", len(abortedWorkflows))
numUpdated = self.archiveWorkflows(abortedWorkflows, "aborted-archived")
logging.info("archive aborted %s workflows", numUpdated)
rejectedWorkflows = self.centralRequestDBReader.getRequestByStatus(["rejected"])
logging.info("Ready to archive rejected %s workflows", len(rejectedWorkflows))
numUpdated = self.archiveWorkflows(rejectedWorkflows, "rejected-archived")
logging.info("archive rejected %s workflows", numUpdated)
except Exception as ex:
logging.error(str(ex))
logging.error("Error occurred, will try again next cycle")
def archiveWorkflows(self, workflows, archiveState):
updated = 0
for workflowName in workflows:
if self.cleanAllLocalCouchDB(workflowName):
if self.useReqMgrForCompletionCheck:
try:
#TODO: try reqmgr1 call if it fails (reqmgr2Only - remove this line when reqmgr is replaced)
self.reqmgrSvc.updateRequestStatus(workflowName, archiveState)
#And replace with this - remove all the excption
#self.reqmgr2Svc.updateRequestStatus(workflowName, archiveState)
except HTTPException as ex:
# If we get an HTTPException of 404 means reqmgr2 request
if ex.status == 404:
# try reqmgr2 call
msg = "%s : reqmgr2 request: %s" % (workflowName, str(ex))
logging.warning(msg)
self.reqmgr2Svc.updateRequestStatus(workflowName, archiveState)
else:
msg = "%s : fail to update status with HTTP error: %s" % (workflowName, str(ex))
logging.error(msg)
raise ex
updated += 1
logging.debug("status updated to %s %s", archiveState, workflowName)
else:
# tier0 update case
self.centralRequestDBWriter.updateRequestStatus(workflowName, archiveState)
return updated
def deleteWorkflowFromJobCouch(self, workflowName, db):
"""
_deleteWorkflowFromCouch_
If we are asked to delete the workflow from couch, delete it
to clear up some space.
Load the document IDs and revisions out of couch by workflowName,
then order a delete on them.
"""
options = {"startkey": [workflowName], "endkey": [workflowName, {}], "reduce": False}
示例12: Tier0FeederPoller
# 需要导入模块: from WMCore.Services.RequestDB.RequestDBWriter import RequestDBWriter [as 别名]
# 或者: from WMCore.Services.RequestDB.RequestDBWriter.RequestDBWriter import updateRequestStatus [as 别名]
#.........这里部分代码省略.........
"""
getNotClosedOutWorkflowsDAO = self.daoFactory(classname = "Tier0Feeder.GetNotClosedOutWorkflows")
workflows = getNotClosedOutWorkflowsDAO.execute()
if len(workflows) == 0:
logging.debug("No workflows to publish to couch monitoring, doing nothing")
if workflows:
for workflow in workflows:
(workflowId, filesetId, filesetOpen, workflowName) = workflow
# find returns -1 if the string is not found
if workflowName.find('PromptReco') >= 0:
logging.debug("Closing out instantaneously PromptReco Workflow %s" % workflowName)
self.updateClosedState(workflowName, workflowId)
else :
# Check if fileset (which you already know) is closed or not
# FIXME: No better way to do it? what comes from the DAO is a string, casting bool or int doesn't help much.
# Works like that :
if filesetOpen == '0':
self.updateClosedState(workflowName, workflowId)
return
def updateClosedState(self, workflowName, workflowId):
"""
_updateClosedState_
Mark a workflow as Closed
"""
markCloseoutWorkflowMonitoringDAO = self.daoFactory(classname = "Tier0Feeder.MarkCloseoutWorkflowMonitoring")
response = self.localRequestCouchDB.updateRequestStatus(workflowName, 'Closed')
if response == "OK" or "EXISTS":
logging.debug("Successfully closed workflow %s" % workflowName)
markCloseoutWorkflowMonitoringDAO.execute(workflowId)
return
def getPromptRecoStatusT0DataSvc(self):
"""
_getPromptRecoStatusDataSvc_
Check the PromptRecoStatus (enabled/disabled) set by the ORM
"""
getPromptRecoStatusDAO = self.daoFactoryT0DataSvc(classname = "T0DataSvc.GetPromptRecoStatus")
status = getPromptRecoStatusDAO.execute(transaction = False)
return status
def updateRunConfigT0DataSvc(self):
"""
_updateRunConfigT0DataSvc_
Check for new runs and push their info into the Tier0 Data Service.
"""
getNewRunDAO = self.daoFactory(classname = "T0DataSvc.GetNewRun")
newRun = getNewRunDAO.execute(transaction = False)
if len(newRun) > 0:
binds = []
for runInfo in newRun:
示例13: CleanCouchPoller
# 需要导入模块: from WMCore.Services.RequestDB.RequestDBWriter import RequestDBWriter [as 别名]
# 或者: from WMCore.Services.RequestDB.RequestDBWriter.RequestDBWriter import updateRequestStatus [as 别名]
class CleanCouchPoller(BaseWorkerThread):
"""
Cleans up local couch db according the the given condition.
1. Cleans local couch db when request is completed and reported to cental db.
This will clean up local couchdb, local summary db, local queue
2. Cleans old couchdoc which is created older than the time threshold
"""
def __init__(self, config):
"""
Initialize config
"""
BaseWorkerThread.__init__(self)
# set the workqueue service for REST call
self.config = config
def setup(self, parameters):
"""
Called at startup
"""
# set the connection for local couchDB call
self.useReqMgrForCompletionCheck = getattr(self.config.TaskArchiver, 'useReqMgrForCompletionCheck', True)
self.archiveDelayHours = getattr(self.config.TaskArchiver, 'archiveDelayHours', 0)
self.wmstatsCouchDB = WMStatsWriter(self.config.TaskArchiver.localWMStatsURL,
"WMStatsAgent")
#TODO: we might need to use local db for Tier0
self.centralRequestDBReader = RequestDBReader(self.config.AnalyticsDataCollector.centralRequestDBURL,
couchapp = self.config.AnalyticsDataCollector.RequestCouchApp)
if self.useReqMgrForCompletionCheck:
self.deletableState = "announced"
self.centralRequestDBWriter = RequestDBWriter(self.config.AnalyticsDataCollector.centralRequestDBURL,
couchapp = self.config.AnalyticsDataCollector.RequestCouchApp)
if self.config.TaskArchiver.reqmgr2Only:
self.reqmgr2Svc = ReqMgr(self.config.TaskArchiver.ReqMgr2ServiceURL)
else:
#TODO: remove this for reqmgr2
self.reqmgrSvc = RequestManager({'endpoint': self.config.TaskArchiver.ReqMgrServiceURL})
else:
# Tier0 case
self.deletableState = "completed"
# use local for update
self.centralRequestDBWriter = RequestDBWriter(self.config.AnalyticsDataCollector.localT0RequestDBURL,
couchapp = self.config.AnalyticsDataCollector.RequestCouchApp)
jobDBurl = sanitizeURL(self.config.JobStateMachine.couchurl)['url']
jobDBName = self.config.JobStateMachine.couchDBName
self.jobCouchdb = CouchServer(jobDBurl)
self.jobsdatabase = self.jobCouchdb.connectDatabase("%s/jobs" % jobDBName)
self.fwjrdatabase = self.jobCouchdb.connectDatabase("%s/fwjrs" % jobDBName)
statSummaryDBName = self.config.JobStateMachine.summaryStatsDBName
self.statsumdatabase = self.jobCouchdb.connectDatabase(statSummaryDBName)
def algorithm(self, parameters):
"""
get information from wmbs, workqueue and local couch
"""
try:
logging.info("Cleaning up the old request docs")
report = self.wmstatsCouchDB.deleteOldDocs(self.config.TaskArchiver.DataKeepDays)
logging.info("%s docs deleted" % report)
logging.info("getting complete and announced requests")
endTime = int(time.time()) - self.archiveDelayHours * 3600
deletableWorkflows = self.centralRequestDBReader.getRequestByStatusAndStartTime(self.deletableState,
False, endTime)
logging.info("Ready to archive normal %s workflows" % len(deletableWorkflows))
numUpdated = self.archiveWorkflows(deletableWorkflows, "normal-archived")
logging.info("archive normal %s workflows" % numUpdated)
abortedWorkflows = self.centralRequestDBReader.getRequestByStatus(["aborted-completed"])
logging.info("Ready to archive aborted %s workflows" % len(abortedWorkflows))
numUpdated = self.archiveWorkflows(abortedWorkflows, "aborted-archived")
logging.info("archive aborted %s workflows" % numUpdated)
rejectedWorkflows = self.centralRequestDBReader.getRequestByStatus(["rejected"])
logging.info("Ready to archive rejected %s workflows" % len(rejectedWorkflows))
numUpdated = self.archiveWorkflows(rejectedWorkflows, "rejected-archived")
logging.info("archive rejected %s workflows" % numUpdated)
except Exception as ex:
logging.error(str(ex))
logging.error("Error occurred, will try again next cycle")
def archiveWorkflows(self, workflows, archiveState):
updated = 0
for workflowName in workflows:
if self.cleanAllLocalCouchDB(workflowName):
if self.useReqMgrForCompletionCheck:
if self.config.TaskArchiver.reqmgr2Only:
self.reqmgr2Svc.updateRequestStatus(workflowName, archiveState)
else:
self.reqmgrSvc.updateRequestStatus(workflowName, archiveState);
updated += 1
logging.debug("status updated to %s %s" % (archiveState, workflowName))
else:
#.........这里部分代码省略.........
示例14: RequestDBTest
# 需要导入模块: from WMCore.Services.RequestDB.RequestDBWriter import RequestDBWriter [as 别名]
# 或者: from WMCore.Services.RequestDB.RequestDBWriter.RequestDBWriter import updateRequestStatus [as 别名]
class RequestDBTest(unittest.TestCase):
"""
"""
def setUp(self):
"""
_setUp_
"""
self.schema = []
self.couchApps = ["ReqMgr"]
self.testInit = TestInitCouchApp('RequestDBServiceTest')
self.testInit.setLogging()
self.testInit.setDatabaseConnection()
self.testInit.setSchema(customModules = self.schema,
useDefault = False)
dbName = 'requsetdb_t'
self.testInit.setupCouch(dbName, *self.couchApps)
reqDBURL = "%s/%s" % (self.testInit.couchUrl, dbName)
self.requestWriter = RequestDBWriter(reqDBURL)
self.requestReader = RequestDBReader(reqDBURL)
self.requestWriter.defaultStale = {}
self.requestReader.defaultStale = {}
return
def tearDown(self):
"""
_tearDown_
Drop all the WMBS tables.
"""
self.testInit.tearDownCouch()
def testRequestDBWriter(self):
# test getWork
schema = generate_reqmgr_schema(3)
result = self.requestWriter.insertGenericRequest(schema[0])
self.assertEqual(len(result), 1, 'insert fail');
self.assertEqual(self.requestWriter.updateRequestStatus(schema[0]['RequestName'], "failed"), 'OK', 'update fail')
self.assertEqual(self.requestWriter.updateRequestStatus("not_exist_schema", "assigned"),
'Error: document not found')
result = self.requestWriter.updateRequestProperty(schema[0]['RequestName'],
{'Teams': ['teamA']})
self.assertEqual(self.requestWriter.updateRequestProperty(schema[0]['RequestName'],
{'Teams': ['teamA']}), 'OK', 'update fail')
self.assertEqual(self.requestWriter.updateRequestProperty("not_exist_schema", {'Teams': 'teamA'}),
'Error: document not found')
result = self.requestReader.getRequestByNames([schema[0]['RequestName']])
self.assertEqual(len(result), 1, "should be 1")
result = self.requestReader.getRequestByStatus(["failed"], False, 1)
self.assertEqual(len(result), 1, "should be 1")
result = self.requestReader.getStatusAndTypeByRequest([schema[0]['RequestName']])
self.assertEqual(result[schema[0]['RequestName']][0], 'failed', "should be failed")
result = self.requestWriter.insertGenericRequest(schema[1])
time.sleep(2)
result = self.requestWriter.insertGenericRequest(schema[2])
endTime = int(time.time()) - 1
result = self.requestReader.getRequestByStatusAndEndTime("new", False, endTime)
self.assertEqual(len(result), 1, "should be 1")
endTime = int(time.time()) + 1
result = self.requestReader.getRequestByStatusAndEndTime("new", False, endTime)
self.assertEqual(len(result), 2, "should be 2")