本文整理汇总了Python中WMComponent.TaskArchiver.TaskArchiverPoller.TaskArchiverPoller类的典型用法代码示例。如果您正苦于以下问题:Python TaskArchiverPoller类的具体用法?Python TaskArchiverPoller怎么用?Python TaskArchiverPoller使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了TaskArchiverPoller类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: atestTaskArchiverPollerAlertsSending_killSubscriptions
def atestTaskArchiverPollerAlertsSending_killSubscriptions(self):
"""
Cause exception (alert-worthy situation) in
the TaskArchiverPoller killSubscriptions method.
(only 1 situation out of two tested).
"""
return
myThread = threading.currentThread()
config = self.getConfig()
testTaskArchiver = TaskArchiverPoller(config = config)
# shall later be called directly from utils module
handler, self.alertsReceiver = \
utils.setUpReceiver(config.Alert.address, config.Alert.controlAddr)
# will fail on calling .load() - regardless, the same except block
numAlerts = 3
doneList = [{'id': x} for x in range(numAlerts)]
# final re-raise is currently commented, so don't expect Exception here
testTaskArchiver.killSubscriptions(doneList)
# wait for the generated alert to arrive
while len(handler.queue) < numAlerts:
time.sleep(0.3)
print "%s waiting for alert to arrive ..." % inspect.stack()[0][3]
self.alertsReceiver.shutdown()
self.alertsReceiver = None
# now check if the alert was properly sent
self.assertEqual(len(handler.queue), numAlerts)
alert = handler.queue[0]
self.assertEqual(alert["Source"], "TaskArchiverPoller")
return
示例2: testB_testErrors
def testB_testErrors(self):
"""
_testErrors_
Test with a failed FWJR
"""
myThread = threading.currentThread()
config = self.getConfig()
workloadPath = os.path.join(self.testDir, 'specDir', 'spec.pkl')
workload = self.createWorkload(workloadName = workloadPath)
testJobGroup = self.createTestJobGroup(config = config,
name = workload.name(),
specLocation = workloadPath,
error = True)
cachePath = os.path.join(config.JobCreator.jobCacheDir,
"TestWorkload", "ReReco")
os.makedirs(cachePath)
self.assertTrue(os.path.exists(cachePath))
couchdb = CouchServer(config.JobStateMachine.couchurl)
jobdb = couchdb.connectDatabase("%s/jobs" % self.databaseName)
fwjrdb = couchdb.connectDatabase("%s/fwjrs" % self.databaseName)
jobdb.loadView("JobDump", "jobsByWorkflowName",
options = {"startkey": [workload.name()],
"endkey": [workload.name(), {}]})['rows']
fwjrdb.loadView("FWJRDump", "fwjrsByWorkflowName",
options = {"startkey": [workload.name()],
"endkey": [workload.name(), {}]})['rows']
testTaskArchiver = TaskArchiverPoller(config = config)
testTaskArchiver.algorithm()
dbname = getattr(config.JobStateMachine, "couchDBName")
workdatabase = couchdb.connectDatabase("%s/workloadsummary" % dbname)
workloadSummary = workdatabase.document(id = workload.name())
self.assertEqual(workloadSummary['errors']['/TestWorkload/ReReco']['failureTime'], 500)
self.assertTrue(workloadSummary['errors']['/TestWorkload/ReReco']['cmsRun1'].has_key('99999'))
failedRunInfo = workloadSummary['errors']['/TestWorkload/ReReco']['cmsRun1']['99999']['runs']
for key, value in failedRunInfo.items():
failedRunInfo[key] = list(set(value))
self.assertEquals(failedRunInfo, {'10' : [12312]},
"Wrong lumi information in the summary for failed jobs")
# Check the failures by site histograms
self.assertEqual(workloadSummary['histograms']['workflowLevel']['failuresBySite']['data']['T1_IT_CNAF']['Failed Jobs'], 10)
self.assertEqual(workloadSummary['histograms']['stepLevel']['/TestWorkload/ReReco']['cmsRun1']['errorsBySite']['data']['T1_IT_CNAF']['99999'], 10)
self.assertEqual(workloadSummary['histograms']['stepLevel']['/TestWorkload/ReReco']['cmsRun1']['errorsBySite']['data']['T1_IT_CNAF']['8020'], 10)
self.assertEqual(workloadSummary['histograms']['workflowLevel']['failuresBySite']['average']['Failed Jobs'], 10)
self.assertEqual(workloadSummary['histograms']['stepLevel']['/TestWorkload/ReReco']['cmsRun1']['errorsBySite']['average']['99999'], 10)
self.assertEqual(workloadSummary['histograms']['stepLevel']['/TestWorkload/ReReco']['cmsRun1']['errorsBySite']['average']['8020'], 10)
self.assertEqual(workloadSummary['histograms']['workflowLevel']['failuresBySite']['stdDev']['Failed Jobs'], 0)
self.assertEqual(workloadSummary['histograms']['stepLevel']['/TestWorkload/ReReco']['cmsRun1']['errorsBySite']['stdDev']['99999'], 0)
self.assertEqual(workloadSummary['histograms']['stepLevel']['/TestWorkload/ReReco']['cmsRun1']['errorsBySite']['stdDev']['8020'], 0)
return
示例3: atestTaskArchiverPollerAlertsSending_notifyWorkQueue
def atestTaskArchiverPollerAlertsSending_notifyWorkQueue(self):
"""
Cause exception (alert-worthy situation) in
the TaskArchiverPoller notifyWorkQueue method.
"""
return
myThread = threading.currentThread()
config = self.getConfig()
testTaskArchiver = TaskArchiverPoller(config = config)
# shall later be called directly from utils module
handler, self.alertsReceiver = \
utils.setUpReceiver(config.Alert.address, config.Alert.controlAddr)
# prepare input such input which will go until where it expectantly
# fails and shall send an alert
# this will currently fail in the TaskArchiverPoller killSubscriptions
# on trying to access .load() method which items of below don't have.
# should anything change in the TaskArchiverPoller without modifying this
# test accordingly, it may be failing ...
print "failures 'AttributeError: 'dict' object has no attribute 'load' expected ..."
subList = [{'id': 1}, {'id': 2}, {'id': 3}]
testTaskArchiver.notifyWorkQueue(subList)
# wait for the generated alert to arrive
while len(handler.queue) < len(subList):
time.sleep(0.3)
print "%s waiting for alert to arrive ..." % inspect.stack()[0][3]
self.alertsReceiver.shutdown()
self.alertsReceiver = None
# now check if the alert was properly sent (expect this many failures)
self.assertEqual(len(handler.queue), len(subList))
alert = handler.queue[0]
self.assertEqual(alert["Source"], "TaskArchiverPoller")
示例4: atestD_Timing
def atestD_Timing(self):
"""
_Timing_
This is to see how fast things go.
"""
return
myThread = threading.currentThread()
name = makeUUID()
config = self.getConfig()
jobList = self.createGiantJobSet(name=name, config=config, nSubs=10, nJobs=1000, nFiles=10)
testTaskArchiver = TaskArchiverPoller(config=config)
startTime = time.time()
testTaskArchiver.algorithm()
stopTime = time.time()
result = myThread.dbi.processData("SELECT * FROM wmbs_job")[0].fetchall()
self.assertEqual(len(result), 0)
result = myThread.dbi.processData("SELECT * FROM wmbs_subscription")[0].fetchall()
self.assertEqual(len(result), 0)
result = myThread.dbi.processData("SELECT * FROM wmbs_jobgroup")[0].fetchall()
self.assertEqual(len(result), 0)
result = myThread.dbi.processData("SELECT * FROM wmbs_file_details")[0].fetchall()
self.assertEqual(len(result), 0)
testWMBSFileset = Fileset(id=1)
self.assertEqual(testWMBSFileset.exists(), False)
logging.info("TaskArchiver took %f seconds" % (stopTime - startTime))
示例5: testE_multicore
def testE_multicore(self):
"""
_multicore_
Create a workload summary based on the multicore job report
"""
myThread = threading.currentThread()
config = self.getConfig()
workloadPath = os.path.join(self.testDir, "specDir", "spec.pkl")
workload = self.createWorkload(workloadName=workloadPath)
testJobGroup = self.createTestJobGroup(
config=config, name=workload.name(), specLocation=workloadPath, error=False, multicore=True
)
cachePath = os.path.join(config.JobCreator.jobCacheDir, "TestWorkload", "ReReco")
os.makedirs(cachePath)
self.assertTrue(os.path.exists(cachePath))
dbname = config.TaskArchiver.workloadSummaryCouchDBName
couchdb = CouchServer(config.JobStateMachine.couchurl)
workdatabase = couchdb.connectDatabase(dbname)
testTaskArchiver = TaskArchiverPoller(config=config)
testTaskArchiver.algorithm()
result = myThread.dbi.processData("SELECT * FROM wmbs_job")[0].fetchall()
self.assertEqual(len(result), 0, "No job should have survived")
result = myThread.dbi.processData("SELECT * FROM wmbs_subscription")[0].fetchall()
self.assertEqual(len(result), 0)
result = myThread.dbi.processData("SELECT * FROM wmbs_jobgroup")[0].fetchall()
self.assertEqual(len(result), 0)
result = myThread.dbi.processData("SELECT * FROM wmbs_file_details")[0].fetchall()
self.assertEqual(len(result), 0)
workloadSummary = workdatabase.document(id="TestWorkload")
self.assertAlmostEquals(
workloadSummary["performance"]["/TestWorkload/ReReco"]["cmsRun1"]["minMergeTime"]["average"],
5.7624950408900002,
places=2,
)
self.assertAlmostEquals(
workloadSummary["performance"]["/TestWorkload/ReReco"]["cmsRun1"]["numberOfMerges"]["average"],
3.0,
places=2,
)
self.assertAlmostEquals(
workloadSummary["performance"]["/TestWorkload/ReReco"]["cmsRun1"]["averageProcessTime"]["average"],
29.369966666700002,
places=2,
)
return
示例6: testB_testErrors
def testB_testErrors(self):
"""
_testErrors_
Test with a failed FWJR
"""
myThread = threading.currentThread()
config = self.getConfig()
workloadPath = os.path.join(self.testDir, 'specDir', 'spec.pkl')
workload = self.createWorkload(workloadName = workloadPath)
testJobGroup = self.createTestJobGroup(config = config,
name = workload.name(),
specLocation = workloadPath,
error = True)
cachePath = os.path.join(config.JobCreator.jobCacheDir,
"TestWorkload", "ReReco")
os.makedirs(cachePath)
self.assertTrue(os.path.exists(cachePath))
testTaskArchiver = TaskArchiverPoller(config = config)
testTaskArchiver.algorithm()
dbname = getattr(config.JobStateMachine, "couchDBName")
couchdb = CouchServer(config.JobStateMachine.couchurl)
workdatabase = couchdb.connectDatabase("%s/workloadsummary" % dbname)
workloadSummary = workdatabase.document(id = workload.name())
self.assertEqual(workloadSummary['errors']['/TestWorkload/ReReco']['failureTime'], 500)
self.assertTrue(workloadSummary['errors']['/TestWorkload/ReReco']['cmsRun1'].has_key('99999'))
self.assertEquals(workloadSummary['errors']['/TestWorkload/ReReco']['cmsRun1']['99999']['runs'], {'10' : [12312]},
"Wrong lumi information in the summary for failed jobs")
return
示例7: testA_StraightThrough
#.........这里部分代码省略.........
#################################################################
# Now the JobAccountant
# First you need to load all jobs
self.getFWJRAction = self.daoFactory(classname = "Jobs.GetFWJRByState")
completeJobs = self.getFWJRAction.execute(state = "complete")
# Create reports for all jobs
self.createReports(jobs = completeJobs, retryCount = 0)
config.Agent.componentName = 'JobAccountant'
testJobAccountant = JobAccountantPoller(config = config)
testJobAccountant.setup()
# It should do something with the jobs
testJobAccountant.algorithm()
# All the jobs should be done now
result = getJobsAction.execute(state = 'Complete', jobType = "Processing")
self.assertEqual(len(result), 0)
result = getJobsAction.execute(state = 'Success', jobType = "Processing")
self.assertEqual(len(result), nSubs * nFiles)
#######################################################################
# Now the JobArchiver
config.Agent.componentName = 'JobArchiver'
testJobArchiver = JobArchiverPoller(config = config)
testJobArchiver.algorithm()
# All the jobs should be cleaned up
result = getJobsAction.execute(state = 'Success', jobType = "Processing")
self.assertEqual(len(result), 0)
result = getJobsAction.execute(state = 'Cleanout', jobType = "Processing")
self.assertEqual(len(result), nSubs * nFiles)
logDir = os.path.join(self.testDir, 'logs')
for job in completeJobs:
self.assertFalse(os.path.exists(job['fwjr_path']))
jobFolder = 'JobCluster_%i' \
% (int(job['id']/config.JobArchiver.numberOfJobsToCluster))
jobPath = os.path.join(logDir, jobFolder, 'Job_%i.tar' %(job['id']))
self.assertTrue(os.path.isfile(jobPath))
self.assertTrue(os.path.getsize(jobPath) > 0)
###########################################################################
# Now the TaskAchiver
config.Agent.componentName = 'TaskArchiver'
testTaskArchiver = TaskArchiverPoller(config = config)
testTaskArchiver.algorithm()
result = getJobsAction.execute(state = 'Cleanout', jobType = "Processing")
self.assertEqual(len(result), 0)
for jdict in completeJobs:
job = Job(id = jdict['id'])
self.assertFalse(job.exists())
if os.path.isdir('testDir'):
shutil.rmtree('testDir')
shutil.copytree('%s' %self.testDir, os.path.join(os.getcwd(), 'testDir'))
return
示例8: testA_BasicFunctionTest
def testA_BasicFunctionTest(self):
"""
_BasicFunctionTest_
Tests the components, by seeing if they can process a simple set of closeouts
"""
myThread = threading.currentThread()
config = self.getConfig()
workloadPath = os.path.join(self.testDir, 'specDir', 'spec.pkl')
workload = self.createWorkload(workloadName = workloadPath)
testJobGroup = self.createTestJobGroup(config = config,
name = workload.name(),
specLocation = workloadPath,
error = False)
# Create second workload
testJobGroup2 = self.createTestJobGroup(config = config,
name = workload.name(),
filesetName = "TestFileset_2",
specLocation = workloadPath,
task = "/TestWorkload/ReReco/LogCollect",
type = "LogCollect")
cachePath = os.path.join(config.JobCreator.jobCacheDir,
"TestWorkload", "ReReco")
os.makedirs(cachePath)
self.assertTrue(os.path.exists(cachePath))
cachePath2 = os.path.join(config.JobCreator.jobCacheDir,
"TestWorkload", "LogCollect")
os.makedirs(cachePath2)
self.assertTrue(os.path.exists(cachePath2))
result = myThread.dbi.processData("SELECT * FROM wmbs_subscription")[0].fetchall()
self.assertEqual(len(result), 2)
workflowName = "TestWorkload"
dbname = config.TaskArchiver.workloadSummaryCouchDBName
couchdb = CouchServer(config.JobStateMachine.couchurl)
workdatabase = couchdb.connectDatabase(dbname)
jobdb = couchdb.connectDatabase("%s/jobs" % self.databaseName)
fwjrdb = couchdb.connectDatabase("%s/fwjrs" % self.databaseName)
jobs = jobdb.loadView("JobDump", "jobsByWorkflowName",
options = {"startkey": [workflowName],
"endkey": [workflowName, {}]})['rows']
fwjrdb.loadView("FWJRDump", "fwjrsByWorkflowName",
options = {"startkey": [workflowName],
"endkey": [workflowName, {}]})['rows']
self.assertEqual(len(jobs), 2*self.nJobs)
from WMCore.WMBS.CreateWMBSBase import CreateWMBSBase
create = CreateWMBSBase()
tables = []
for x in create.requiredTables:
tables.append(x[2:])
self.populateWorkflowWithCompleteStatus()
testTaskArchiver = TaskArchiverPoller(config = config)
testTaskArchiver.algorithm()
cleanCouch = CleanCouchPoller(config = config)
cleanCouch.setup()
cleanCouch.algorithm()
result = myThread.dbi.processData("SELECT * FROM wmbs_job")[0].fetchall()
self.assertEqual(len(result), 0)
result = myThread.dbi.processData("SELECT * FROM wmbs_subscription")[0].fetchall()
self.assertEqual(len(result), 0)
result = myThread.dbi.processData("SELECT * FROM wmbs_jobgroup")[0].fetchall()
self.assertEqual(len(result), 0)
result = myThread.dbi.processData("SELECT * FROM wmbs_fileset")[0].fetchall()
self.assertEqual(len(result), 0)
result = myThread.dbi.processData("SELECT * FROM wmbs_file_details")[0].fetchall()
self.assertEqual(len(result), 0)
# Make sure we deleted the directory
self.assertFalse(os.path.exists(cachePath))
self.assertFalse(os.path.exists(os.path.join(self.testDir, 'workloadTest/TestWorkload')))
testWMBSFileset = Fileset(id = 1)
self.assertEqual(testWMBSFileset.exists(), False)
workloadSummary = workdatabase.document(id = "TestWorkload")
# Check ACDC
self.assertEqual(workloadSummary['ACDCServer'], sanitizeURL(config.ACDC.couchurl)['url'])
# Check the output
self.assertEqual(workloadSummary['output'].keys(), ['/Electron/MorePenguins-v0/RECO'])
self.assertEqual(sorted(workloadSummary['output']['/Electron/MorePenguins-v0/RECO']['tasks']),
['/TestWorkload/ReReco', '/TestWorkload/ReReco/LogCollect'])
# Check performance
# Check histograms
self.assertAlmostEquals(workloadSummary['performance']['/TestWorkload/ReReco']['cmsRun1']['AvgEventTime']['histogram'][0]['average'],
0.89405199999999996, places = 2)
self.assertEqual(workloadSummary['performance']['/TestWorkload/ReReco']['cmsRun1']['AvgEventTime']['histogram'][0]['nEvents'],
#.........这里部分代码省略.........
示例9: testA_BasicFunctionTest
def testA_BasicFunctionTest(self):
"""
_BasicFunctionTest_
Tests the components, by seeing if they can process a simple set of closeouts
"""
myThread = threading.currentThread()
config = self.getConfig()
workloadPath = os.path.join(self.testDir, 'specDir', 'spec.pkl')
workload = self.createWorkload(workloadName = workloadPath)
testJobGroup = self.createTestJobGroup(config = config,
name = workload.name(),
specLocation = workloadPath,
error = False)
# Create second workload
testJobGroup2 = self.createTestJobGroup(config = config,
name = workload.name(),
filesetName = "TestFileset_2",
specLocation = workloadPath,
task = "/TestWorkload/ReReco/LogCollect")
cachePath = os.path.join(config.JobCreator.jobCacheDir,
"TestWorkload", "ReReco")
os.makedirs(cachePath)
self.assertTrue(os.path.exists(cachePath))
cachePath2 = os.path.join(config.JobCreator.jobCacheDir,
"TestWorkload", "LogCollect")
os.makedirs(cachePath2)
self.assertTrue(os.path.exists(cachePath2))
result = myThread.dbi.processData("SELECT * FROM wmbs_subscription")[0].fetchall()
self.assertEqual(len(result), 2)
workflowName = "TestWorkload"
dbname = config.TaskArchiver.workloadSummaryCouchDBName
couchdb = CouchServer(config.JobStateMachine.couchurl)
workdatabase = couchdb.connectDatabase(dbname)
jobdb = couchdb.connectDatabase("%s/jobs" % self.databaseName)
fwjrdb = couchdb.connectDatabase("%s/fwjrs" % self.databaseName)
jobs = jobdb.loadView("JobDump", "jobsByWorkflowName",
options = {"startkey": [workflowName],
"endkey": [workflowName, {}]})['rows']
self.assertEqual(len(jobs), 2*self.nJobs)
from WMCore.WMBS.CreateWMBSBase import CreateWMBSBase
create = CreateWMBSBase()
tables = []
for x in create.requiredTables:
tables.append(x[2:])
testTaskArchiver = TaskArchiverPoller(config = config)
testTaskArchiver.algorithm()
result = myThread.dbi.processData("SELECT * FROM wmbs_job")[0].fetchall()
self.assertEqual(len(result), 0)
result = myThread.dbi.processData("SELECT * FROM wmbs_subscription")[0].fetchall()
self.assertEqual(len(result), 0)
result = myThread.dbi.processData("SELECT * FROM wmbs_jobgroup")[0].fetchall()
self.assertEqual(len(result), 0)
result = myThread.dbi.processData("SELECT * FROM wmbs_fileset")[0].fetchall()
self.assertEqual(len(result), 0)
result = myThread.dbi.processData("SELECT * FROM wmbs_file_details")[0].fetchall()
self.assertEqual(len(result), 0)
# Make sure we deleted the directory
self.assertFalse(os.path.exists(cachePath))
self.assertFalse(os.path.exists(os.path.join(self.testDir, 'workloadTest/TestWorkload')))
testWMBSFileset = Fileset(id = 1)
self.assertEqual(testWMBSFileset.exists(), False)
workloadSummary = workdatabase.document(id = "TestWorkload")
# Check ACDC
self.assertEqual(workloadSummary['ACDCServer'], sanitizeURL(config.ACDC.couchurl)['url'])
# Check the output
self.assertEqual(workloadSummary['output'].keys(), ['/Electron/MorePenguins-v0/RECO',
'/Electron/MorePenguins-v0/ALCARECO'])
# Check performance
# Check histograms
self.assertAlmostEquals(workloadSummary['performance']['/TestWorkload/ReReco']['cmsRun1']['AvgEventTime']['histogram'][0]['average'],
0.062651899999999996, places = 2)
self.assertEqual(workloadSummary['performance']['/TestWorkload/ReReco']['cmsRun1']['AvgEventTime']['histogram'][0]['nEvents'],
5)
# Check standard performance
self.assertAlmostEquals(workloadSummary['performance']['/TestWorkload/ReReco']['cmsRun1']['TotalJobCPU']['average'], 9.4950600000000005,
places = 2)
self.assertAlmostEquals(workloadSummary['performance']['/TestWorkload/ReReco']['cmsRun1']['TotalJobCPU']['stdDev'], 8.2912400000000002,
places = 2)
# Check worstOffenders
#.........这里部分代码省略.........
示例10: testB_testErrors
def testB_testErrors(self):
"""
_testErrors_
Test with a failed FWJR
"""
myThread = threading.currentThread()
config = self.getConfig()
workloadPath = os.path.join(self.testDir, "specDir", "spec.pkl")
workload = self.createWorkload(workloadName=workloadPath)
testJobGroup = self.createTestJobGroup(
config=config, name=workload.name(), specLocation=workloadPath, error=True
)
cachePath = os.path.join(config.JobCreator.jobCacheDir, "TestWorkload", "ReReco")
os.makedirs(cachePath)
self.assertTrue(os.path.exists(cachePath))
couchdb = CouchServer(config.JobStateMachine.couchurl)
jobdb = couchdb.connectDatabase("%s/jobs" % self.databaseName)
fwjrdb = couchdb.connectDatabase("%s/fwjrs" % self.databaseName)
jobdb.loadView(
"JobDump", "jobsByWorkflowName", options={"startkey": [workload.name()], "endkey": [workload.name(), {}]}
)["rows"]
fwjrdb.loadView(
"FWJRDump", "fwjrsByWorkflowName", options={"startkey": [workload.name()], "endkey": [workload.name(), {}]}
)["rows"]
testTaskArchiver = TaskArchiverPoller(config=config)
testTaskArchiver.algorithm()
dbname = getattr(config.JobStateMachine, "couchDBName")
workdatabase = couchdb.connectDatabase("%s/workloadsummary" % dbname)
workloadSummary = workdatabase.document(id=workload.name())
self.assertEqual(workloadSummary["errors"]["/TestWorkload/ReReco"]["failureTime"], 500)
self.assertTrue(workloadSummary["errors"]["/TestWorkload/ReReco"]["cmsRun1"].has_key("99999"))
self.assertEquals(
workloadSummary["errors"]["/TestWorkload/ReReco"]["cmsRun1"]["99999"]["runs"],
{"10": [12312]},
"Wrong lumi information in the summary for failed jobs",
)
# Check the failures by site histograms
self.assertEqual(
workloadSummary["histograms"]["workflowLevel"]["failuresBySite"]["data"]["T1_IT_CNAF"]["Failed Jobs"], 10
)
self.assertEqual(
workloadSummary["histograms"]["stepLevel"]["/TestWorkload/ReReco"]["cmsRun1"]["errorsBySite"]["data"][
"T1_IT_CNAF"
]["99999"],
10,
)
self.assertEqual(
workloadSummary["histograms"]["stepLevel"]["/TestWorkload/ReReco"]["cmsRun1"]["errorsBySite"]["data"][
"T1_IT_CNAF"
]["8020"],
10,
)
self.assertEqual(workloadSummary["histograms"]["workflowLevel"]["failuresBySite"]["average"]["Failed Jobs"], 10)
self.assertEqual(
workloadSummary["histograms"]["stepLevel"]["/TestWorkload/ReReco"]["cmsRun1"]["errorsBySite"]["average"][
"99999"
],
10,
)
self.assertEqual(
workloadSummary["histograms"]["stepLevel"]["/TestWorkload/ReReco"]["cmsRun1"]["errorsBySite"]["average"][
"8020"
],
10,
)
self.assertEqual(workloadSummary["histograms"]["workflowLevel"]["failuresBySite"]["stdDev"]["Failed Jobs"], 0)
self.assertEqual(
workloadSummary["histograms"]["stepLevel"]["/TestWorkload/ReReco"]["cmsRun1"]["errorsBySite"]["stdDev"][
"99999"
],
0,
)
self.assertEqual(
workloadSummary["histograms"]["stepLevel"]["/TestWorkload/ReReco"]["cmsRun1"]["errorsBySite"]["stdDev"][
"8020"
],
0,
)
return
示例11: testB_testErrors
def testB_testErrors(self):
"""
_testErrors_
Test with a failed FWJR
"""
config = self.getConfig()
workloadPath = os.path.join(self.testDir, 'specDir', 'spec.pkl')
workload = self.createWorkload(workloadName=workloadPath)
testJobGroup = self.createTestJobGroup(config=config,
name=workload.name(),
specLocation=workloadPath,
error=True)
# Create second workload
testJobGroup2 = self.createTestJobGroup(config=config,
name=workload.name(),
filesetName="TestFileset_2",
specLocation=workloadPath,
task="/TestWorkload/ReReco/LogCollect",
jobType="LogCollect")
cachePath = os.path.join(config.JobCreator.jobCacheDir,
"TestWorkload", "ReReco")
os.makedirs(cachePath)
self.assertTrue(os.path.exists(cachePath))
couchdb = CouchServer(config.JobStateMachine.couchurl)
jobdb = couchdb.connectDatabase("%s/jobs" % self.databaseName)
fwjrdb = couchdb.connectDatabase("%s/fwjrs" % self.databaseName)
jobdb.loadView("JobDump", "jobsByWorkflowName",
options={"startkey": [workload.name()],
"endkey": [workload.name(), {}]})['rows']
fwjrdb.loadView("FWJRDump", "fwjrsByWorkflowName",
options={"startkey": [workload.name()],
"endkey": [workload.name(), {}]})['rows']
self.populateWorkflowWithCompleteStatus()
testTaskArchiver = TaskArchiverPoller(config=config)
testTaskArchiver.algorithm()
cleanCouch = CleanCouchPoller(config=config)
cleanCouch.setup()
cleanCouch.algorithm()
dbname = getattr(config.JobStateMachine, "couchDBName")
workdatabase = couchdb.connectDatabase("%s/workloadsummary" % dbname)
workloadSummary = workdatabase.document(id=workload.name())
self.assertEqual(workloadSummary['errors']['/TestWorkload/ReReco']['failureTime'], 500)
self.assertTrue('99999' in workloadSummary['errors']['/TestWorkload/ReReco']['cmsRun1'])
failedRunInfo = workloadSummary['errors']['/TestWorkload/ReReco']['cmsRun1']['99999']['runs']
self.assertEqual(failedRunInfo, {'10': [[12312, 12312]]},
"Wrong lumi information in the summary for failed jobs")
# Check the failures by site histograms
self.assertEqual(
workloadSummary['histograms']['workflowLevel']['failuresBySite']['data']['T1_IT_CNAF']['Failed Jobs'], 10)
self.assertEqual(
workloadSummary['histograms']['stepLevel']['/TestWorkload/ReReco']['cmsRun1']['errorsBySite']['data'][
'T1_IT_CNAF']['99999'], 10)
self.assertEqual(
workloadSummary['histograms']['stepLevel']['/TestWorkload/ReReco']['cmsRun1']['errorsBySite']['data'][
'T1_IT_CNAF']['8020'], 10)
self.assertEqual(workloadSummary['histograms']['workflowLevel']['failuresBySite']['average']['Failed Jobs'], 10)
self.assertEqual(
workloadSummary['histograms']['stepLevel']['/TestWorkload/ReReco']['cmsRun1']['errorsBySite']['average'][
'99999'], 10)
self.assertEqual(
workloadSummary['histograms']['stepLevel']['/TestWorkload/ReReco']['cmsRun1']['errorsBySite']['average'][
'8020'], 10)
self.assertEqual(workloadSummary['histograms']['workflowLevel']['failuresBySite']['stdDev']['Failed Jobs'], 0)
self.assertEqual(
workloadSummary['histograms']['stepLevel']['/TestWorkload/ReReco']['cmsRun1']['errorsBySite']['stdDev'][
'99999'], 0)
self.assertEqual(
workloadSummary['histograms']['stepLevel']['/TestWorkload/ReReco']['cmsRun1']['errorsBySite']['stdDev'][
'8020'], 0)
return