本文整理汇总了Python中WMComponent.JobSubmitter.JobSubmitterPoller.JobSubmitterPoller.algorithm方法的典型用法代码示例。如果您正苦于以下问题:Python JobSubmitterPoller.algorithm方法的具体用法?Python JobSubmitterPoller.algorithm怎么用?Python JobSubmitterPoller.algorithm使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类WMComponent.JobSubmitter.JobSubmitterPoller.JobSubmitterPoller
的用法示例。
在下文中一共展示了JobSubmitterPoller.algorithm方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testF_OverloadTest
# 需要导入模块: from WMComponent.JobSubmitter.JobSubmitterPoller import JobSubmitterPoller [as 别名]
# 或者: from WMComponent.JobSubmitter.JobSubmitterPoller.JobSubmitterPoller import algorithm [as 别名]
def testF_OverloadTest(self):
"""
_OverloadTest_
Test and see what happens if you put in more jobs
Then the sites can handle
"""
resourceControl = ResourceControl()
for site in self.sites:
resourceControl.insertThreshold(siteName=site, taskType="Silly", maxSlots=1)
nRunning = getCondorRunningJobs(self.user)
self.assertEqual(nRunning, 0, "User currently has %i running jobs. Test will not continue" % (nRunning))
workloadName = "basicWorkload"
myThread = threading.currentThread()
workload = self.createTestWorkload()
config = self.getConfig()
changeState = ChangeState(config)
nSubs = 2
nJobs = 10
cacheDir = os.path.join(self.testDir, "CacheDir")
jobGroupList = self.createJobGroups(
nSubs=nSubs,
nJobs=nJobs,
task=workload.getTask("ReReco"),
workloadSpec=os.path.join(self.testDir, "workloadTest", workloadName),
type="Silly",
)
for group in jobGroupList:
changeState.propagate(group.jobs, "created", "new")
jobSubmitter = JobSubmitterPoller(config=config)
# Actually run it
jobSubmitter.algorithm()
# Should be one job for each site
nSites = len(self.sites)
nRunning = getCondorRunningJobs(self.user)
self.assertEqual(nRunning, nSites)
getJobsAction = self.daoFactory(classname="Jobs.GetAllJobs")
result = getJobsAction.execute(state="Executing", jobType="Silly")
self.assertEqual(len(result), nSites)
result = getJobsAction.execute(state="Created", jobType="Silly")
self.assertEqual(len(result), nJobs * nSubs - nSites)
# Now clean-up
command = ["condor_rm", self.user]
pipe = Popen(command, stdout=PIPE, stderr=PIPE, shell=False)
pipe.communicate()
del jobSubmitter
return
示例2: testD_SubmitFailed
# 需要导入模块: from WMComponent.JobSubmitter.JobSubmitterPoller import JobSubmitterPoller [as 别名]
# 或者: from WMComponent.JobSubmitter.JobSubmitterPoller.JobSubmitterPoller import algorithm [as 别名]
def testD_SubmitFailed(self):
"""
_testD_SubmitFailed_
Check if jobs without a possible site to run at go to SubmitFailed
"""
workloadName = "basicWorkload"
workload = self.createTestWorkload()
config = self.getConfig()
changeState = ChangeState(config)
nSubs = 2
nJobs = 10
jobGroupList = self.createJobGroups(nSubs = nSubs, nJobs = nJobs,
task = workload.getTask("ReReco"),
site = [],
workloadSpec = os.path.join(self.testDir,
'workloadTest',
workloadName))
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
jobSubmitter = JobSubmitterPoller(config = config)
jobSubmitter.algorithm()
# Jobs should go to submit failed
getJobsAction = self.daoFactory(classname = "Jobs.GetAllJobs")
result = getJobsAction.execute(state = 'SubmitFailed', jobType = "Processing")
self.assertEqual(len(result), nSubs * nJobs)
return
示例3: testF_WMSMode
# 需要导入模块: from WMComponent.JobSubmitter.JobSubmitterPoller import JobSubmitterPoller [as 别名]
# 或者: from WMComponent.JobSubmitter.JobSubmitterPoller.JobSubmitterPoller import algorithm [as 别名]
def testF_WMSMode(self):
"""
_WMSMode_
Try running things in WMS Mode.
"""
nRunning = getCondorRunningJobs(self.user)
self.assertEqual(nRunning, 0, "User currently has %i running jobs. Test will not continue" % (nRunning))
config = self.getConfig()
config.BossAir.pluginName = 'CondorPlugin'
config.BossAir.submitWMSMode = True
baAPI = BossAirAPI(config = config)
workload = self.createTestWorkload()
workloadName = "basicWorkload"
changeState = ChangeState(config)
nSubs = 5
nJobs = 10
cacheDir = os.path.join(self.testDir, 'CacheDir')
jobGroupList = self.createJobGroups(nSubs = nSubs, nJobs = nJobs,
task = workload.getTask("ReReco"),
workloadSpec = os.path.join(self.testDir,
'workloadTest',
workloadName),
site = None)
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
jobSubmitter = JobSubmitterPoller(config = config)
jobSubmitter.algorithm()
nRunning = getCondorRunningJobs(self.user)
self.assertEqual(nRunning, nSubs * nJobs)
baAPI.track()
idleJobs = baAPI._loadByStatus(status = 'Idle')
sn = "T2_US_UCSD"
# Test the Site Info has been updated. Make Sure T2_US_UCSD is not in the sitelist
# in BossAir_t.py
baAPI.updateSiteInformation(idleJobs, sn, True)
# Now kill 'em manually
# command = ['condor_rm', self.user]
# pipe = Popen(command, stdout = PIPE, stderr = PIPE, shell = False)
# pipe.communicate()
del jobSubmitter
return
示例4: testF_WMSMode
# 需要导入模块: from WMComponent.JobSubmitter.JobSubmitterPoller import JobSubmitterPoller [as 别名]
# 或者: from WMComponent.JobSubmitter.JobSubmitterPoller.JobSubmitterPoller import algorithm [as 别名]
def testF_WMSMode(self):
"""
_WMSMode_
Try running things in WMS Mode.
"""
nRunning = getCondorRunningJobs(self.user)
self.assertEqual(nRunning, 0, "User currently has %i running jobs. Test will not continue" % (nRunning))
config = self.getConfig()
config.BossAir.pluginName = 'PyCondorPlugin'
config.BossAir.submitWMSMode = True
baAPI = BossAirAPI(config = config)
workload = self.createTestWorkload()
workloadName = "basicWorkload"
changeState = ChangeState(config)
nSubs = 5
nJobs = 10
cacheDir = os.path.join(self.testDir, 'CacheDir')
jobGroupList = self.createJobGroups(nSubs = nSubs, nJobs = nJobs,
task = workload.getTask("ReReco"),
workloadSpec = os.path.join(self.testDir,
'workloadTest',
workloadName),
site = None)
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
jobSubmitter = JobSubmitterPoller(config = config)
jobSubmitter.algorithm()
nRunning = getCondorRunningJobs(self.user)
self.assertEqual(nRunning, nSubs * nJobs)
baAPI.track()
idleJobs = baAPI._loadByStatus(status = 'Idle')
baAPI.kill(jobs = idleJobs)
del jobSubmitter
return
示例5: testF_WMSMode
# 需要导入模块: from WMComponent.JobSubmitter.JobSubmitterPoller import JobSubmitterPoller [as 别名]
# 或者: from WMComponent.JobSubmitter.JobSubmitterPoller.JobSubmitterPoller import algorithm [as 别名]
def testF_WMSMode(self):
"""
_WMSMode_
Try running things in WMS Mode.
"""
nRunning = getCondorRunningJobs(self.user)
self.assertEqual(nRunning, 0, "User currently has %i running jobs. Test will not continue" % (nRunning))
config = self.getConfig()
config.BossAir.pluginName = 'CondorPlugin'
config.BossAir.submitWMSMode = True
baAPI = BossAirAPI(config = config)
workload = self.createTestWorkload()
workloadName = "basicWorkload"
changeState = ChangeState(config)
nSubs = 5
nJobs = 10
cacheDir = os.path.join(self.testDir, 'CacheDir')
jobGroupList = self.createJobGroups(nSubs = nSubs, nJobs = nJobs,
task = workload.getTask("ReReco"),
workloadSpec = os.path.join(self.testDir,
'workloadTest',
workloadName),
site = None)
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
jobSubmitter = JobSubmitterPoller(config = config)
jobSubmitter.algorithm()
nRunning = getCondorRunningJobs(self.user)
self.assertEqual(nRunning, nSubs * nJobs)
# Now kill 'em manually
command = ['condor_rm', self.user]
pipe = Popen(command, stdout = PIPE, stderr = PIPE, shell = False)
pipe.communicate()
del jobSubmitter
return
示例6: testT_updateJobInfo
# 需要导入模块: from WMComponent.JobSubmitter.JobSubmitterPoller import JobSubmitterPoller [as 别名]
# 或者: from WMComponent.JobSubmitter.JobSubmitterPoller.JobSubmitterPoller import algorithm [as 别名]
def testT_updateJobInfo(self):
"""
_updateJobInfo_
Test the updateSiteInformation method from CondorPlugin.py
"""
nRunning = getCondorRunningJobs(self.user)
self.assertEqual(nRunning, 0, "User currently has %i running jobs. Test will not continue" % (nRunning))
config = self.getConfig()
config.BossAir.pluginName = 'CondorPlugin'
config.BossAir.submitWMSMode = True
baAPI = BossAirAPI(config=config)
workload = self.createTestWorkload()
workloadName = "basicWorkload"
changeState = ChangeState(config)
nSubs = 1
nJobs = 2
dummycacheDir = os.path.join(self.testDir, 'CacheDir')
jobGroupList = self.createJobGroups(nSubs=nSubs, nJobs=nJobs,
task=workload.getTask("ReReco"),
workloadSpec=os.path.join(self.testDir,
'workloadTest',
workloadName),
site="se.T2_US_UCSD")
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
jobSubmitter = JobSubmitterPoller(config=config)
jobSubmitter.algorithm()
nRunning = getCondorRunningJobs(self.user)
self.assertEqual(nRunning, nSubs * nJobs)
baAPI.track()
idleJobs = baAPI._loadByStatus(status='Idle')
##
# Make one of the sites in the sitelist to be True for ABORTED/DRAINING/DOWN
# updateSiteInformation() method should edit the classAd for all the jobs
# that are bound for the site
# Check the Q manually using condor_q -l <job id>
#
jtok = baAPI.updateSiteInformation(idleJobs, "T2_US_UCSD", True)
if jtok != None:
baAPI.kill(jtok, errorCode=71301) # errorCode can be either 71301/71302/71303 (Aborted/Draining/Down)
return
示例7: testJobSiteDrain
# 需要导入模块: from WMComponent.JobSubmitter.JobSubmitterPoller import JobSubmitterPoller [as 别名]
# 或者: from WMComponent.JobSubmitter.JobSubmitterPoller.JobSubmitterPoller import algorithm [as 别名]
def testJobSiteDrain(self):
"""
_testJobSiteDrain_
Test the behavior of jobs pending to a single site that is in drain mode
"""
workload = self.createTestWorkload()
config = self.getConfig()
jobSubmitter = JobSubmitterPoller(config=config)
myResourceControl = ResourceControl(config)
changeState = ChangeState(config)
getJobsAction = self.daoFactory(classname="Jobs.GetAllJobs")
nSubs = 1
nJobs = 30
site = 'T2_US_Nebraska'
self.setResourceThresholds(site, pendingSlots=100, runningSlots=100,
tasks=['Processing', 'Merge'],
Processing={'pendingSlots': 10, 'runningSlots': 10},
Merge={'pendingSlots': 10, 'runningSlots': 10, 'priority': 5})
jobGroupList = self.createJobGroups(nSubs=nSubs, nJobs=nJobs,
site=[site],
task=workload.getTask("ReReco"),
workloadSpec=self.workloadSpecPath)
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
# submit first 10 jobs
jobSubmitter.algorithm()
result = getJobsAction.execute(state='Executing', jobType="Processing")
self.assertEqual(len(result), 10)
myResourceControl.changeSiteState(site, 'Draining')
# site is now in drain, so don't submit anything
jobSubmitter.algorithm()
# jobs were supposed to get killed, but I guess the MockPlugin doesnt do anything
result = getJobsAction.execute(state='Executing', jobType="Processing")
self.assertEqual(len(result), 10)
result = getJobsAction.execute(state='created', jobType="Processing")
self.assertEqual(len(result), 20)
result = getJobsAction.execute(state='submitfailed', jobType="Processing")
self.assertEqual(len(result), 0)
# make sure the drain grace period expires...
time.sleep(3)
jobSubmitter.algorithm()
result = getJobsAction.execute(state='Executing', jobType="Processing")
self.assertEqual(len(result), 10)
# the remaining jobs should have gone to submitfailed by now
result = getJobsAction.execute(state='submitfailed', jobType="Processing")
self.assertEqual(len(result), 20)
result = getJobsAction.execute(state='created', jobType="Processing")
self.assertEqual(len(result), 0)
示例8: testA_BasicTest
# 需要导入模块: from WMComponent.JobSubmitter.JobSubmitterPoller import JobSubmitterPoller [as 别名]
# 或者: from WMComponent.JobSubmitter.JobSubmitterPoller.JobSubmitterPoller import algorithm [as 别名]
def testA_BasicTest(self):
"""
Use the CondorGlobusPlugin to create a very simple test
Check to see that all the jobs were submitted
Parse and test the JDL files
See what condor says
"""
workloadName = "basicWorkload"
myThread = threading.currentThread()
workload = self.createTestWorkload()
config = self.getConfig()
changeState = ChangeState(config)
nSubs = 1
nJobs = 10
cacheDir = os.path.join(self.testDir, "CacheDir")
jobGroupList = self.createJobGroups(
nSubs=nSubs,
nJobs=nJobs,
task=workload.getTask("ReReco"),
workloadSpec=os.path.join(self.testDir, "workloadTest", workloadName),
site="se.T2_US_UCSD",
)
for group in jobGroupList:
changeState.propagate(group.jobs, "created", "new")
# Do pre-submit check
getJobsAction = self.daoFactory(classname="Jobs.GetAllJobs")
result = getJobsAction.execute(state="Created", jobType="Processing")
self.assertEqual(len(result), nSubs * nJobs)
nRunning = getCondorRunningJobs(self.user)
self.assertEqual(nRunning, 0, "User currently has %i running jobs. Test will not continue" % (nRunning))
jobSubmitter = JobSubmitterPoller(config=config)
jobSubmitter.algorithm()
# Check that jobs are in the right state
result = getJobsAction.execute(state="Created", jobType="Processing")
self.assertEqual(len(result), 0)
result = getJobsAction.execute(state="Executing", jobType="Processing")
self.assertEqual(len(result), nSubs * nJobs)
# Check assigned locations
getLocationAction = self.daoFactory(classname="Jobs.GetLocation")
for id in result:
loc = getLocationAction.execute(jobid=id)
self.assertEqual(loc, [["T2_US_UCSD"]])
# Check on the JDL
submitFile = None
for file in os.listdir(config.JobSubmitter.submitDir):
if re.search("submit", file):
submitFile = file
self.assertTrue(submitFile != None)
self.checkJDL(config=config, cacheDir=cacheDir, submitFile=submitFile, site="T2_US_UCSD")
# if os.path.exists('CacheDir'):
# shutil.rmtree('CacheDir')
# shutil.copytree(self.testDir, 'CacheDir')
# Check to make sure we have running jobs
nRunning = getCondorRunningJobs(self.user)
self.assertEqual(nRunning, nJobs * nSubs)
# This should do nothing
jobGroupList = self.createJobGroups(
nSubs=nSubs,
nJobs=nJobs,
task=workload.getTask("ReReco"),
workloadSpec=os.path.join(self.testDir, "workloadTest", workloadName),
site="se.T2_US_UCSD",
)
for group in jobGroupList:
changeState.propagate(group.jobs, "created", "new")
jobSubmitter.algorithm()
# Now clean-up
command = ["condor_rm", self.user]
pipe = Popen(command, stdout=PIPE, stderr=PIPE, shell=False)
pipe.communicate()
del jobSubmitter
return
示例9: testA_StraightThrough
# 需要导入模块: from WMComponent.JobSubmitter.JobSubmitterPoller import JobSubmitterPoller [as 别名]
# 或者: from WMComponent.JobSubmitter.JobSubmitterPoller.JobSubmitterPoller import algorithm [as 别名]
def testA_StraightThrough(self):
"""
_StraightThrough_
Just run everything straight through without any variations
"""
# Do pre-submit job check
nRunning = getCondorRunningJobs()
self.assertEqual(nRunning, 0, "User currently has %i running jobs. Test will not continue" % (nRunning))
myThread = threading.currentThread()
workload = self.createTestWorkload()
config = self.getConfig()
name = 'WMAgent_Test1'
site = self.sites[0]
nSubs = 5
nFiles = 10
workloadPath = os.path.join(self.testDir, 'workloadTest',
'TestWorkload', 'WMSandbox',
'WMWorkload.pkl')
# Create a collection of files
self.createFileCollection(name = name, nSubs = nSubs,
nFiles = nFiles,
workflowURL = workloadPath,
site = site)
############################################################
# Test the JobCreator
config.Agent.componentName = 'JobCreator'
testJobCreator = JobCreatorPoller(config = config)
testJobCreator.algorithm()
time.sleep(5)
# Did all jobs get created?
getJobsAction = self.daoFactory(classname = "Jobs.GetAllJobs")
result = getJobsAction.execute(state = 'Created', jobType = "Processing")
self.assertEqual(len(result), nSubs*nFiles)
# Count database objects
result = myThread.dbi.processData('SELECT * FROM wmbs_sub_files_acquired')[0].fetchall()
self.assertEqual(len(result), nSubs * nFiles)
# Find the test directory
testDirectory = os.path.join(self.testDir, 'TestWorkload', 'ReReco')
self.assertTrue('JobCollection_1_0' in os.listdir(testDirectory))
self.assertTrue(len(os.listdir(testDirectory)) <= 20)
groupDirectory = os.path.join(testDirectory, 'JobCollection_1_0')
# First job should be in here
self.assertTrue('job_1' in os.listdir(groupDirectory))
jobFile = os.path.join(groupDirectory, 'job_1', 'job.pkl')
self.assertTrue(os.path.isfile(jobFile))
f = open(jobFile, 'r')
job = cPickle.load(f)
f.close()
self.assertEqual(job['workflow'], name)
self.assertEqual(len(job['input_files']), 1)
self.assertEqual(os.path.basename(job['sandbox']), 'TestWorkload-Sandbox.tar.bz2')
###############################################################
# Now test the JobSubmitter
config.Agent.componentName = 'JobSubmitter'
testJobSubmitter = JobSubmitterPoller(config = config)
testJobSubmitter.algorithm()
# Check that jobs are in the right state
result = getJobsAction.execute(state = 'Created', jobType = "Processing")
self.assertEqual(len(result), 0)
result = getJobsAction.execute(state = 'Executing', jobType = "Processing")
self.assertEqual(len(result), nSubs * nFiles)
#.........这里部分代码省略.........
示例10: testE_SiteModesTest
# 需要导入模块: from WMComponent.JobSubmitter.JobSubmitterPoller import JobSubmitterPoller [as 别名]
# 或者: from WMComponent.JobSubmitter.JobSubmitterPoller.JobSubmitterPoller import algorithm [as 别名]
def testE_SiteModesTest(self):
"""
_testE_SiteModesTest_
Test the behavior of the submitter in response to the different
states of the sites
"""
workloadName = "basicWorkload"
workload = self.createTestWorkload()
config = self.getConfig()
changeState = ChangeState(config)
nSubs = 1
nJobs = 20
sites = ['T2_US_Florida', 'T2_TW_Taiwan', 'T3_CO_Uniandes', 'T1_US_FNAL']
for site in sites:
self.setResourceThresholds(site, pendingSlots = 10, runningSlots = -1, tasks = ['Processing', 'Merge'],
Processing = {'pendingSlots' : 10, 'runningSlots' :-1},
Merge = {'pendingSlots' : 10, 'runningSlots' :-1, 'priority' : 5})
myResourceControl = ResourceControl()
myResourceControl.changeSiteState('T2_US_Florida', 'Draining')
# First test that we prefer Normal over drain, and T1 over T2/T3
jobGroupList = self.createJobGroups(nSubs = nSubs, nJobs = nJobs,
site = ['se.%s' % x for x in sites],
task = workload.getTask("ReReco"),
workloadSpec = os.path.join(self.testDir,
'workloadTest',
workloadName))
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
jobSubmitter = JobSubmitterPoller(config = config)
# Actually run it
jobSubmitter.algorithm()
getJobsAction = self.daoFactory(classname = "Jobs.GetAllJobs")
result = getJobsAction.execute(state = 'Executing', jobType = "Processing")
self.assertEqual(len(result), nSubs * nJobs)
# All jobs should be at either FNAL, Taiwan or Uniandes. It's a random selection
# Check assigned locations
getLocationAction = self.daoFactory(classname = "Jobs.GetLocation")
locationDict = getLocationAction.execute([{'jobid' : x} for x in result])
for entry in locationDict:
loc = entry['site_name']
self.assertNotEqual(loc, 'T2_US_Florida')
# Now set everything to down, check we don't submit anything
for site in sites:
myResourceControl.changeSiteState(site, 'Down')
jobGroupList = self.createJobGroups(nSubs = nSubs, nJobs = nJobs,
site = ['se.%s' % x for x in sites],
task = workload.getTask("ReReco"),
workloadSpec = os.path.join(self.testDir,
'workloadTest',
workloadName))
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
jobSubmitter.algorithm()
# Nothing is submitted despite the empty slots at Uniandes and Florida
result = getJobsAction.execute(state = 'Executing', jobType = "Processing")
self.assertEqual(len(result), nSubs * nJobs)
# Now set everything to Aborted, and create Merge jobs. Those should fail
# since the can only run at one place
for site in sites:
myResourceControl.changeSiteState(site, 'Aborted')
nSubsMerge = 1
nJobsMerge = 5
jobGroupList = self.createJobGroups(nSubs = nSubsMerge, nJobs = nJobsMerge,
site = ['se.%s' % x for x in sites],
task = workload.getTask("ReReco"),
workloadSpec = os.path.join(self.testDir,
'workloadTest',
workloadName),
taskType = 'Merge')
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
jobSubmitter.algorithm()
result = getJobsAction.execute(state = 'SubmitFailed', jobType = 'Merge')
self.assertEqual(len(result), nSubsMerge * nJobsMerge)
result = getJobsAction.execute(state = 'Executing', jobType = 'Processing')
self.assertEqual(len(result), nSubs * nJobs)
return
示例11: testD_PrototypeChain
# 需要导入模块: from WMComponent.JobSubmitter.JobSubmitterPoller import JobSubmitterPoller [as 别名]
# 或者: from WMComponent.JobSubmitter.JobSubmitterPoller.JobSubmitterPoller import algorithm [as 别名]
def testD_PrototypeChain(self):
"""
_PrototypeChain_
Prototype the BossAir workflow
"""
myThread = threading.currentThread()
nRunning = getCondorRunningJobs(self.user)
self.assertEqual(nRunning, 0, "User currently has %i running jobs. Test will not continue" % (nRunning))
config = self.getConfig()
config.BossAir.pluginName = 'CondorPlugin'
baAPI = BossAirAPI(config = config)
workload = self.createTestWorkload()
workloadName = "basicWorkload"
changeState = ChangeState(config)
nSubs = 5
nJobs = 10
cacheDir = os.path.join(self.testDir, 'CacheDir')
jobGroupList = self.createJobGroups(nSubs = nSubs, nJobs = nJobs,
task = workload.getTask("ReReco"),
workloadSpec = os.path.join(self.testDir,
'workloadTest',
workloadName),
site = 'se.T2_US_UCSD')
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
jobSubmitter = JobSubmitterPoller(config = config)
jobTracker = JobTrackerPoller(config = config)
statusPoller = StatusPoller(config = config)
jobSubmitter.algorithm()
nRunning = getCondorRunningJobs(self.user)
self.assertEqual(nRunning, nSubs * nJobs)
newJobs = baAPI._loadByStatus(status = 'New')
self.assertEqual(len(newJobs), nSubs * nJobs)
# Check WMBS
getJobsAction = self.daoFactory(classname = "Jobs.GetAllJobs")
result = getJobsAction.execute(state = 'Executing', jobType = "Processing")
self.assertEqual(len(result), nSubs * nJobs)
statusPoller.algorithm()
nRunning = getCondorRunningJobs(self.user)
self.assertEqual(nRunning, nSubs * nJobs)
newJobs = baAPI._loadByStatus(status = 'New')
self.assertEqual(len(newJobs), 0)
newJobs = baAPI._loadByStatus(status = 'Idle')
self.assertEqual(len(newJobs), nSubs * nJobs)
# Tracker should do nothing
jobTracker.algorithm()
result = getJobsAction.execute(state = 'Executing', jobType = "Processing")
self.assertEqual(len(result), nSubs * nJobs)
# Wait for jobs to timeout due to short Pending wait period
time.sleep(12)
statusPoller.algorithm()
newJobs = baAPI._loadByStatus(status = 'Idle')
self.assertEqual(len(newJobs), 0)
newJobs = baAPI._loadByStatus(status = 'Timeout', complete = '0')
self.assertEqual(len(newJobs), nSubs * nJobs)
# Jobs should be gone
nRunning = getCondorRunningJobs(self.user)
self.assertEqual(nRunning, 0)
# Check if they're complete
completeJobs = baAPI.getComplete()
self.assertEqual(len(completeJobs), nSubs * nJobs)
# Because they timed out, they all should have failed
jobTracker.algorithm()
result = getJobsAction.execute(state = 'Executing', jobType = "Processing")
#.........这里部分代码省略.........
示例12: testC_prioritization
# 需要导入模块: from WMComponent.JobSubmitter.JobSubmitterPoller import JobSubmitterPoller [as 别名]
# 或者: from WMComponent.JobSubmitter.JobSubmitterPoller.JobSubmitterPoller import algorithm [as 别名]
def testC_prioritization(self):
"""
_testC_prioritization_
Check that jobs are prioritized by job type and by oldest workflow
"""
workloadName = "basicWorkload"
workload = self.createTestWorkload()
config = self.getConfig()
changeState = ChangeState(config)
nSubs = 1
nJobs = 10
sites = ['T1_US_FNAL']
for site in sites:
self.setResourceThresholds(site, pendingSlots = 10, runningSlots = -1, tasks = ['Processing', 'Merge'],
Processing = {'pendingSlots' : 50, 'runningSlots' :-1},
Merge = {'pendingSlots' : 10, 'runningSlots' :-1, 'priority' : 5})
# Always initialize the submitter after setting the sites, flaky!
jobSubmitter = JobSubmitterPoller(config = config)
jobGroupList = self.createJobGroups(nSubs = nSubs, nJobs = nJobs,
task = workload.getTask("ReReco"),
workloadSpec = os.path.join(self.testDir, 'workloadTest',
workloadName),
site = 'se.%s' % 'T1_US_FNAL',
name = 'OldestWorkflow')
jobGroupList.extend(self.createJobGroups(nSubs = nSubs, nJobs = nJobs,
task = workload.getTask("ReReco"),
workloadSpec = os.path.join(self.testDir, 'workloadTest',
workloadName),
site = 'se.%s' % 'T1_US_FNAL',
taskType = 'Merge'))
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
jobSubmitter.algorithm()
# Merge goes first
getJobsAction = self.daoFactory(classname = "Jobs.GetAllJobs")
result = getJobsAction.execute(state = 'Created', jobType = "Merge")
self.assertEqual(len(result), 0)
result = getJobsAction.execute(state = 'Executing', jobType = "Merge")
self.assertEqual(len(result), 10)
result = getJobsAction.execute(state = 'Created', jobType = "Processing")
self.assertEqual(len(result), 10)
result = getJobsAction.execute(state = 'Executing', jobType = "Processing")
self.assertEqual(len(result), 0)
# Create a newer workflow processing, and after some new jobs for an old workflow
jobGroupList = self.createJobGroups(nSubs = nSubs, nJobs = nJobs,
task = workload.getTask("ReReco"),
workloadSpec = os.path.join(self.testDir, 'workloadTest',
workloadName),
site = 'se.%s' % 'T1_US_FNAL',
name = 'NewestWorkflow')
jobGroupList.extend(self.createJobGroups(nSubs = nSubs, nJobs = nJobs,
task = workload.getTask("ReReco"),
workloadSpec = os.path.join(self.testDir, 'workloadTest',
workloadName),
site = 'se.%s' % 'T1_US_FNAL',
name = 'OldestWorkflow'))
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
# Move pending jobs to running
getRunJobID = self.baDaoFactory(classname = "LoadByWMBSID")
setRunJobStatus = self.baDaoFactory(classname = "SetStatus")
for idx in range(2):
result = getJobsAction.execute(state = 'Executing')
binds = []
for jobId in result:
binds.append({'id' : jobId, 'retry_count' : 0})
runJobIds = getRunJobID.execute(binds)
setRunJobStatus.execute([x['id'] for x in runJobIds], 'Running')
# Run again on created workflows
jobSubmitter.algorithm()
result = getJobsAction.execute(state = 'Created', jobType = "Merge")
self.assertEqual(len(result), 0)
result = getJobsAction.execute(state = 'Executing', jobType = "Merge")
self.assertEqual(len(result), 10)
result = getJobsAction.execute(state = 'Created', jobType = "Processing")
self.assertEqual(len(result), 30 - (idx + 1) * 10)
result = getJobsAction.execute(state = 'Executing', jobType = "Processing")
self.assertEqual(len(result), (idx + 1) * 10)
# Check that older workflow goes first even with newer jobs
getWorkflowAction = self.daoFactory(classname = "Jobs.GetWorkflowTask")
workflows = getWorkflowAction.execute(result)
for workflow in workflows:
self.assertEqual(workflow['name'], 'OldestWorkflow')
#.........这里部分代码省略.........
示例13: testD_WhiteListBlackList
# 需要导入模块: from WMComponent.JobSubmitter.JobSubmitterPoller import JobSubmitterPoller [as 别名]
# 或者: from WMComponent.JobSubmitter.JobSubmitterPoller.JobSubmitterPoller import algorithm [as 别名]
def testD_WhiteListBlackList(self):
"""
_testD_WhiteListBlackList_
Test the whitelist/blacklist implementation
Trust the jobCreator to get this in the job right
"""
workloadName = "basicWorkload"
workload = self.createTestWorkload()
config = self.getConfig()
changeState = ChangeState(config)
nSubs = 2
nJobs = 10
sites = ['T2_US_Florida', 'T2_TW_Taiwan', 'T2_CH_CERN', 'T3_CO_Uniandes']
for site in sites:
self.setResourceThresholds(site, pendingSlots = 1000, runningSlots = -1, tasks = ['Processing', 'Merge'],
Processing = {'pendingSlots' : 5000, 'runningSlots' :-1},
Merge = {'pendingSlots' : 1000, 'runningSlots' :-1, 'priority' : 5})
jobGroupList = self.createJobGroups(nSubs = nSubs, nJobs = nJobs,
site = 'se.%s' % sites[-1],
task = workload.getTask("ReReco"),
workloadSpec = os.path.join(self.testDir,
'workloadTest',
workloadName),
bl = sites[:-1])
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
jobSubmitter = JobSubmitterPoller(config = config)
# Actually run it
jobSubmitter.algorithm()
getJobsAction = self.daoFactory(classname = "Jobs.GetAllJobs")
result = getJobsAction.execute(state = 'Executing', jobType = "Processing")
self.assertEqual(len(result), nSubs * nJobs)
# All jobs should be at T3_CO_Uniandes
# Check assigned locations
getLocationAction = self.daoFactory(classname = "Jobs.GetLocation")
locationDict = getLocationAction.execute([{'jobid' : x} for x in result])
for entry in locationDict:
loc = entry['site_name']
self.assertEqual(loc, 'T3_CO_Uniandes')
# Run again and test the whiteList
jobGroupList = self.createJobGroups(nSubs = nSubs, nJobs = nJobs,
task = workload.getTask("ReReco"),
site = 'se.%s' % 'T2_CH_CERN',
workloadSpec = os.path.join(self.testDir,
'workloadTest',
workloadName),
wl = ['T2_CH_CERN'])
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
# Run it
jobSubmitter.algorithm()
# You'll have jobs from the previous run still in the database
result = getJobsAction.execute(state = 'Executing', jobType = "Processing")
self.assertEqual(len(result), nSubs * nJobs * 2)
# All jobs should be at CERN or Uniandes
locationDict = getLocationAction.execute([{'jobid' : x} for x in result])
for entry in locationDict[nSubs * nJobs:]:
loc = entry['site_name']
self.assertEqual(loc, 'T2_CH_CERN')
# Run again with an invalid whitelist
# After this point, the original two sets of jobs will be executing
# The rest of the jobs should move to submitFailed
jobGroupList = self.createJobGroups(nSubs = nSubs, nJobs = nJobs,
task = workload.getTask("ReReco"),
site = 'se.%s' % 'T2_CH_CERN',
workloadSpec = os.path.join(self.testDir,
'workloadTest',
workloadName),
wl = ['T2_US_Namibia'])
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
jobSubmitter.algorithm()
# Jobs should be gone
getJobsAction = self.daoFactory(classname = "Jobs.GetAllJobs")
result = getJobsAction.execute(state = 'Executing', jobType = "Processing")
self.assertEqual(len(result), nSubs * nJobs * 2)
result = getJobsAction.execute(state = 'SubmitFailed', jobType = "Processing")
self.assertEqual(len(result), nSubs * nJobs)
# Run again with all sites blacklisted
jobGroupList = self.createJobGroups(nSubs = nSubs, nJobs = nJobs,
#.........这里部分代码省略.........
示例14: testA_BasicTest
# 需要导入模块: from WMComponent.JobSubmitter.JobSubmitterPoller import JobSubmitterPoller [as 别名]
# 或者: from WMComponent.JobSubmitter.JobSubmitterPoller.JobSubmitterPoller import algorithm [as 别名]
def testA_BasicTest(self):
"""
Use the MockPlugin to create a simple test
Check to see that all the jobs were "submitted",
don't care about thresholds
"""
workloadName = "basicWorkload"
workload = self.createTestWorkload()
config = self.getConfig()
changeState = ChangeState(config)
nSubs = 2
nJobs = 20
site = 'T2_US_UCSD'
self.setResourceThresholds(site, pendingSlots = 50, runningSlots = 100, tasks = ['Processing', 'Merge'],
Processing = {'pendingSlots' : 50, 'runningSlots' : 100},
Merge = {'pendingSlots' : 50, 'runningSlots' : 100})
jobGroupList = self.createJobGroups(nSubs = nSubs, nJobs = nJobs,
task = workload.getTask("ReReco"),
workloadSpec = os.path.join(self.testDir, 'workloadTest',
workloadName),
site = 'se.%s' % site)
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
# Do pre-submit check
getJobsAction = self.daoFactory(classname = "Jobs.GetAllJobs")
result = getJobsAction.execute(state = 'Created', jobType = "Processing")
self.assertEqual(len(result), nSubs * nJobs)
jobSubmitter = JobSubmitterPoller(config = config)
jobSubmitter.algorithm()
# Check that jobs are in the right state
result = getJobsAction.execute(state = 'Created', jobType = "Processing")
self.assertEqual(len(result), 0)
result = getJobsAction.execute(state = 'Executing', jobType = "Processing")
self.assertEqual(len(result), nSubs * nJobs)
# Check assigned locations
getLocationAction = self.daoFactory(classname = "Jobs.GetLocation")
for jobId in result:
loc = getLocationAction.execute(jobid = jobId)
self.assertEqual(loc, [['T2_US_UCSD']])
# Run another cycle, it shouldn't submit anything. There isn't anything to submit
jobSubmitter.algorithm()
result = getJobsAction.execute(state = 'Created', jobType = "Processing")
self.assertEqual(len(result), 0)
result = getJobsAction.execute(state = 'Executing', jobType = "Processing")
self.assertEqual(len(result), nSubs * nJobs)
nSubs = 1
nJobs = 10
# Submit another 10 jobs
jobGroupList = self.createJobGroups(nSubs = nSubs, nJobs = nJobs,
task = workload.getTask("ReReco"),
workloadSpec = os.path.join(self.testDir, 'workloadTest',
workloadName),
site = 'se.%s' % site,
taskType = "Merge")
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
# Check that the jobs are available for submission and run another cycle
result = getJobsAction.execute(state = 'Created', jobType = "Merge")
self.assertEqual(len(result), nSubs * nJobs)
jobSubmitter.algorithm()
#Check that the last 10 jobs were submitted as well.
result = getJobsAction.execute(state = 'Created', jobType = "Merge")
self.assertEqual(len(result), 0)
result = getJobsAction.execute(state = 'Executing', jobType = "Merge")
self.assertEqual(len(result), nSubs * nJobs)
return
示例15: testB_thresholdTest
# 需要导入模块: from WMComponent.JobSubmitter.JobSubmitterPoller import JobSubmitterPoller [as 别名]
# 或者: from WMComponent.JobSubmitter.JobSubmitterPoller.JobSubmitterPoller import algorithm [as 别名]
def testB_thresholdTest(self):
"""
_testB_thresholdTest_
Check that the threshold management is working,
this requires checks on pending/running jobs globally
at a site and per task/site
"""
workloadName = "basicWorkload"
workload = self.createTestWorkload()
config = self.getConfig()
changeState = ChangeState(config)
nSubs = 5
nJobs = 10
sites = ['T1_US_FNAL']
for site in sites:
self.setResourceThresholds(site, pendingSlots = 50, runningSlots = 200, tasks = ['Processing', 'Merge'],
Processing = {'pendingSlots' : 45, 'runningSlots' :-1},
Merge = {'pendingSlots' : 10, 'runningSlots' : 20, 'priority' : 5})
# Always initialize the submitter after setting the sites, flaky!
jobSubmitter = JobSubmitterPoller(config = config)
jobGroupList = self.createJobGroups(nSubs = nSubs, nJobs = nJobs,
task = workload.getTask("ReReco"),
workloadSpec = os.path.join(self.testDir, 'workloadTest',
workloadName),
site = 'se.%s' % 'T1_US_FNAL')
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
# Do pre-submit check
getJobsAction = self.daoFactory(classname = "Jobs.GetAllJobs")
result = getJobsAction.execute(state = 'Created', jobType = "Processing")
self.assertEqual(len(result), nSubs * nJobs)
jobSubmitter.algorithm()
# Check that jobs are in the right state,
# here we are limited by the pending threshold for the Processing task (45)
result = getJobsAction.execute(state = 'Created', jobType = "Processing")
self.assertEqual(len(result), 5)
result = getJobsAction.execute(state = 'Executing', jobType = "Processing")
self.assertEqual(len(result), 45)
# Check assigned locations
getLocationAction = self.daoFactory(classname = "Jobs.GetLocation")
for jobId in result:
loc = getLocationAction.execute(jobid = jobId)
self.assertEqual(loc, [['T1_US_FNAL']])
# Run another cycle, it shouldn't submit anything. Jobs are still in pending
jobSubmitter.algorithm()
result = getJobsAction.execute(state = 'Created', jobType = "Processing")
self.assertEqual(len(result), 5)
result = getJobsAction.execute(state = 'Executing', jobType = "Processing")
self.assertEqual(len(result), 45)
# Now put 10 Merge jobs, only 5 can be submitted, there we hit the global pending threshold for the site
nSubs = 1
nJobs = 10
jobGroupList = self.createJobGroups(nSubs = nSubs, nJobs = nJobs,
task = workload.getTask("ReReco"),
workloadSpec = os.path.join(self.testDir, 'workloadTest',
workloadName),
site = 'se.%s' % 'T1_US_FNAL',
taskType = 'Merge')
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
jobSubmitter.algorithm()
result = getJobsAction.execute(state = 'Created', jobType = "Merge")
self.assertEqual(len(result), 5)
result = getJobsAction.execute(state = 'Executing', jobType = "Merge")
self.assertEqual(len(result), 5)
result = getJobsAction.execute(state = 'Created', jobType = "Processing")
self.assertEqual(len(result), 5)
result = getJobsAction.execute(state = 'Executing', jobType = "Processing")
self.assertEqual(len(result), 45)
# Now let's test running thresholds
# The scenario will be setup as follows: Move all current jobs as running
# Create 300 Processing jobs and 300 merge jobs
# Run 5 polling cycles, moving all pending jobs to running in between
# Result is, merge is left at 25 running 0 pending and processing is left at 215 running 0 pending
# Processing has 135 jobs in queue and Merge 285
# This tests all threshold dynamics including the prioritization of merge over processing
nSubs = 1
nJobs = 300
jobGroupList = self.createJobGroups(nSubs = nSubs, nJobs = nJobs,
task = workload.getTask("ReReco"),
workloadSpec = os.path.join(self.testDir, 'workloadTest',
workloadName),
site = 'se.%s' % 'T1_US_FNAL')
jobGroupList.extend(self.createJobGroups(nSubs = nSubs, nJobs = nJobs,
task = workload.getTask("ReReco"),
workloadSpec = os.path.join(self.testDir, 'workloadTest',
workloadName),
#.........这里部分代码省略.........