本文整理汇总了Python中WMCore.BossAir.BossAirAPI.BossAirAPI类的典型用法代码示例。如果您正苦于以下问题:Python BossAirAPI类的具体用法?Python BossAirAPI怎么用?Python BossAirAPI使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了BossAirAPI类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: changeSiteState
def changeSiteState(self, siteName, state):
"""
_changeSiteState_
Set a site to some of the possible states and perform
proper actions with the jobs, according to the state
"""
state2ExitCode = {"Aborted": 71301,
"Draining": 71302,
"Down": 71303}
executingJobs = self.wmbsDAOFactory(classname="Jobs.ListByState")
jobInfo = executingJobs.execute(state='executing')
if jobInfo:
bossAir = BossAirAPI(self.config, noSetup=True)
jobtokill = bossAir.updateSiteInformation(jobInfo, siteName, state in state2ExitCode)
ercode = state2ExitCode.get(state, 71300)
bossAir.kill(jobtokill, errorCode=ercode)
# only now that jobs were updated by the plugin, we flip the site state
setStateAction = self.wmbsDAOFactory(classname="Locations.SetState")
setStateAction.execute(siteName=siteName, state=state,
conn=self.getDBConn(),
transaction=self.existingTransaction())
return
示例2: changeSiteState
def changeSiteState(self, siteName, state):
"""
_changeSiteState_
Set a site to some of the possible states,
if the state is Aborted we must do extra actions.
"""
setStateAction = self.wmbsDAOFactory(classname = "Locations.SetState")
setStateAction.execute(siteName = siteName, state = state,
conn = self.getDBConn(),
transaction = self.existingTransaction())
executingJobs = self.wmbsDAOFactory(classname = "Jobs.ListByState")
jobInfo = executingJobs.execute(state = 'executing')
if not jobInfo:
# then no jobs to look at
return
bossAir = BossAirAPI(self.config, noSetup = True)
jobtokill = bossAir.updateSiteInformation(jobInfo, siteName, state in ("Aborted","Draining","Down"))
if state == "Aborted":
ercode=71301
elif state == "Draining":
ercode=71302
elif state == "Down":
ercode=71303
else:
ercode=71300
bossAir.kill(jobtokill, errorCode=ercode)
return
示例3: testG_monitoringDAO
def testG_monitoringDAO(self):
"""
_monitoringDAO_
Because I need a test for the monitoring DAO
"""
myThread = threading.currentThread()
config = self.getConfig()
baAPI = BossAirAPI(config=config, insertStates=True)
# Create some jobs
nJobs = 10
jobDummies = self.createDummyJobs(nJobs=nJobs)
# Prior to building the job, each job must have a plugin
# and user assigned
for job in jobDummies:
job['plugin'] = 'TestPlugin'
job['owner'] = 'tapas'
job['location'] = 'T2_US_UCSD'
job.save()
baAPI.submit(jobs=jobDummies)
results = baAPI.monitor()
self.assertEqual(results[0]['Pending'], nJobs)
return
示例4: killWorkflow
def killWorkflow(workflowName, jobCouchConfig, bossAirConfig = None):
"""
_killWorkflow_
Kill a workflow that is already executing inside the agent. This will
mark all incomplete jobs as failed and files that belong to all
non-cleanup and non-logcollect subscriptions as failed. The name of the
JSM couch database and the URL to the database must be passed in as well
so the state transitions are logged.
"""
myThread = threading.currentThread()
daoFactory = DAOFactory(package = "WMCore.WMBS",
logger = myThread.logger,
dbinterface = myThread.dbi)
killFilesAction = daoFactory(classname = "Subscriptions.KillWorkflow")
killJobsAction = daoFactory(classname = "Jobs.KillWorkflow")
existingTransaction = False
if myThread.transaction.conn:
existingTransaction = True
else:
myThread.transaction.begin()
killFilesAction.execute(workflowName = workflowName,
conn = myThread.transaction.conn,
transaction = True)
liveJobs = killJobsAction.execute(workflowName = workflowName,
conn = myThread.transaction.conn,
transaction = True)
changeState = ChangeState(jobCouchConfig)
# Deal with any jobs that are running in the batch system
# only works if we can start the API
if bossAirConfig:
bossAir = BossAirAPI(config = bossAirConfig, noSetup = True)
killableJobs = []
for liveJob in liveJobs:
if liveJob["state"].lower() == 'executing':
# Then we need to kill this on the batch system
liveWMBSJob = Job(id = liveJob["id"])
liveWMBSJob.update(liveJob)
changeState.propagate(liveWMBSJob, "killed", liveJob["state"])
killableJobs.append(liveJob)
# Now kill them
try:
bossAir.kill(jobs = killableJobs)
except BossAirException, ex:
# Something's gone wrong
# Jobs not killed!
logging.error("Error while trying to kill running jobs in workflow!\n")
logging.error(str(ex))
trace = getattr(ex, 'traceback', '')
logging.error(trace)
# But continue; we need to kill the jobs in the master
# the batch system will have to take care of itself.
pass
示例5: changeSiteState
def changeSiteState(self, siteName, state):
"""
_changeSiteState_
Set a site to some of the possible states,
if the state is Aborted we must do extra actions.
"""
setStateAction = self.wmbsDAOFactory(classname = "Locations.SetState")
setStateAction.execute(siteName = siteName, state = state,
conn = self.getDBConn(),
transaction = self.existingTransaction())
if state == "Aborted" and self.config:
# Kill all jobs in the batch system assigned to this site
executingJobs = self.wmbsDAOFactory(classname = "Jobs.ListByStateAndLocation")
jobIds = executingJobs.execute(state = 'executing', location = siteName)
bossAir = BossAirAPI(self.config, noSetup = True)
bossAir.kill(jobIds, errorCode = 61301)
return
示例6: testKillWorkflow
def testKillWorkflow(self):
"""
_testKillWorkflow_
Verify that workflow killing works correctly.
"""
baAPI = BossAirAPI(config=self.config, insertStates=True)
# Create nine jobs
self.setupForKillTest(baAPI=baAPI)
self.assertEqual(len(baAPI._listRunJobs()), 9)
killWorkflow("Main", self.config, self.config)
self.verifyFileKillStatus()
self.verifyJobKillStatus()
self.assertEqual(len(baAPI._listRunJobs()), 8)
return
示例7: testF_WMSMode
def testF_WMSMode(self):
"""
_WMSMode_
Try running things in WMS Mode.
"""
nRunning = getCondorRunningJobs(self.user)
self.assertEqual(nRunning, 0, "User currently has %i running jobs. Test will not continue" % (nRunning))
config = self.getConfig()
config.BossAir.pluginName = 'CondorPlugin'
config.BossAir.submitWMSMode = True
baAPI = BossAirAPI(config = config)
workload = self.createTestWorkload()
workloadName = "basicWorkload"
changeState = ChangeState(config)
nSubs = 5
nJobs = 10
cacheDir = os.path.join(self.testDir, 'CacheDir')
jobGroupList = self.createJobGroups(nSubs = nSubs, nJobs = nJobs,
task = workload.getTask("ReReco"),
workloadSpec = os.path.join(self.testDir,
'workloadTest',
workloadName),
site = None)
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
jobSubmitter = JobSubmitterPoller(config = config)
jobSubmitter.algorithm()
nRunning = getCondorRunningJobs(self.user)
self.assertEqual(nRunning, nSubs * nJobs)
baAPI.track()
idleJobs = baAPI._loadByStatus(status = 'Idle')
sn = "T2_US_UCSD"
# Test the Site Info has been updated. Make Sure T2_US_UCSD is not in the sitelist
# in BossAir_t.py
baAPI.updateSiteInformation(idleJobs, sn, True)
# Now kill 'em manually
# command = ['condor_rm', self.user]
# pipe = Popen(command, stdout = PIPE, stderr = PIPE, shell = False)
# pipe.communicate()
del jobSubmitter
return
示例8: testKillWorkflow
def testKillWorkflow(self):
"""
_testKillWorkflow_
Verify that workflow killing works correctly.
"""
configFile = EmulatorSetup.setupWMAgentConfig()
config = loadConfigurationFile(configFile)
baAPI = BossAirAPI(config = config)
# Create nine jobs
self.setupForKillTest(baAPI = baAPI)
self.assertEqual(len(baAPI._listRunJobs()), 9)
killWorkflow("Main", config, config)
self.verifyFileKillStatus()
self.verifyJobKillStatus()
self.assertEqual(len(baAPI._listRunJobs()), 8)
EmulatorSetup.deleteConfig(configFile)
return
示例9: testT_updateJobInfo
def testT_updateJobInfo(self):
"""
_updateJobInfo_
Test the updateSiteInformation method from CondorPlugin.py
"""
nRunning = getCondorRunningJobs(self.user)
self.assertEqual(nRunning, 0, "User currently has %i running jobs. Test will not continue" % (nRunning))
config = self.getConfig()
config.BossAir.pluginName = 'CondorPlugin'
config.BossAir.submitWMSMode = True
baAPI = BossAirAPI(config=config)
workload = self.createTestWorkload()
workloadName = "basicWorkload"
changeState = ChangeState(config)
nSubs = 1
nJobs = 2
dummycacheDir = os.path.join(self.testDir, 'CacheDir')
jobGroupList = self.createJobGroups(nSubs=nSubs, nJobs=nJobs,
task=workload.getTask("ReReco"),
workloadSpec=os.path.join(self.testDir,
'workloadTest',
workloadName),
site="se.T2_US_UCSD")
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
jobSubmitter = JobSubmitterPoller(config=config)
jobSubmitter.algorithm()
nRunning = getCondorRunningJobs(self.user)
self.assertEqual(nRunning, nSubs * nJobs)
baAPI.track()
idleJobs = baAPI._loadByStatus(status='Idle')
##
# Make one of the sites in the sitelist to be True for ABORTED/DRAINING/DOWN
# updateSiteInformation() method should edit the classAd for all the jobs
# that are bound for the site
# Check the Q manually using condor_q -l <job id>
#
jtok = baAPI.updateSiteInformation(idleJobs, "T2_US_UCSD", True)
if jtok != None:
baAPI.kill(jtok, errorCode=71301) # errorCode can be either 71301/71302/71303 (Aborted/Draining/Down)
return
示例10: __init__
def __init__(self, config):
"""
Initialise class members
"""
BaseWorkerThread.__init__(self)
self.config = config
myThread = threading.currentThread()
self.changeState = ChangeState(self.config)
self.bossAir = BossAirAPI(config=config)
self.daoFactory = DAOFactory(package="WMCore.WMBS",
logger=myThread.logger,
dbinterface=myThread.dbi)
self.jobListAction = self.daoFactory(classname="Jobs.GetAllJobs")
self.setFWJRAction = self.daoFactory(classname="Jobs.SetFWJRPath")
示例11: __init__
def __init__(self, config):
"""
Initialise class members
"""
BaseWorkerThread.__init__(self)
self.config = config
myThread = threading.currentThread()
self.changeState = ChangeState(self.config)
self.bossAir = BossAirAPI(config=config)
self.daoFactory = DAOFactory(package="WMCore.WMBS", logger=myThread.logger, dbinterface=myThread.dbi)
self.jobListAction = self.daoFactory(classname="Jobs.GetAllJobs")
# initialize the alert framework (if available)
self.initAlerts(compName="JobTracker")
示例12: testF_WMSMode
def testF_WMSMode(self):
"""
_WMSMode_
Try running things in WMS Mode.
"""
nRunning = getCondorRunningJobs(self.user)
self.assertEqual(nRunning, 0, "User currently has %i running jobs. Test will not continue" % (nRunning))
config = self.getConfig()
config.BossAir.pluginName = 'PyCondorPlugin'
config.BossAir.submitWMSMode = True
baAPI = BossAirAPI(config = config)
workload = self.createTestWorkload()
workloadName = "basicWorkload"
changeState = ChangeState(config)
nSubs = 5
nJobs = 10
cacheDir = os.path.join(self.testDir, 'CacheDir')
jobGroupList = self.createJobGroups(nSubs = nSubs, nJobs = nJobs,
task = workload.getTask("ReReco"),
workloadSpec = os.path.join(self.testDir,
'workloadTest',
workloadName),
site = None)
for group in jobGroupList:
changeState.propagate(group.jobs, 'created', 'new')
jobSubmitter = JobSubmitterPoller(config = config)
jobSubmitter.algorithm()
nRunning = getCondorRunningJobs(self.user)
self.assertEqual(nRunning, nSubs * nJobs)
baAPI.track()
idleJobs = baAPI._loadByStatus(status = 'Idle')
baAPI.kill(jobs = idleJobs)
del jobSubmitter
return
示例13: __init__
def __init__(self, config):
"""
__init__
Set up the caching and other objects
"""
self.config = config
BaseWorkerThread.__init__(self)
self.cachedJobs = []
self.bossAir = BossAirAPI(config=config)
# With no timeouts, nothing ever happens
# Otherwise we expect a dictionary with the keys representing
# the states and the values the timeouts.
self.timeouts = getattr(config.JobStatusLite, 'stateTimeouts')
return
示例14: testT_updateJobInfo
def testT_updateJobInfo(self):
"""
_updateJobInfo_
Test the updateSiteInformation method from CondorPlugin.py
"""
nRunning = getCondorRunningJobs(self.user)
config = self.getConfig()
config.BossAir.pluginName = 'CondorPlugin'
baAPI = BossAirAPI(config = config)
baAPI.track()
idleJobs = baAPI._loadByStatus(status = 'Idle')
print idleJobs
for job in idleJobs :
print job['id']
baAPI.updateSiteInformation(idleJobs, info = None)
return
示例15: __init__
def __init__(self, config):
BaseWorkerThread.__init__(self)
myThread = threading.currentThread()
self.config = config
#DAO factory for WMBS objects
self.daoFactory = DAOFactory(package="WMCore.WMBS", logger=logging, dbinterface=myThread.dbi)
#Libraries
self.resourceControl = ResourceControl()
self.changeState = ChangeState(self.config)
self.bossAir = BossAirAPI(config=self.config)
self.repollCount = getattr(self.config.JobSubmitter, 'repollCount', 10000)
self.maxJobsPerPoll = int(getattr(self.config.JobSubmitter, 'maxJobsPerPoll', 1000))
self.cacheRefreshSize = int(getattr(self.config.JobSubmitter, 'cacheRefreshSize', 30000))
self.skipRefreshCount = int(getattr(self.config.JobSubmitter, 'skipRefreshCount', 20))
self.packageSize = getattr(self.config.JobSubmitter, 'packageSize', 500)
self.collSize = getattr(self.config.JobSubmitter, 'collectionSize', self.packageSize * 1000)
self.maxTaskPriority = getattr(self.config.BossAir, 'maxTaskPriority', 1e7)
# Additions for caching-based JobSubmitter
self.cachedJobIDs = set()
self.cachedJobs = {}
self.jobDataCache = {}
self.jobsToPackage = {}
self.sandboxPackage = {}
self.locationDict = {}
self.taskTypePrioMap = {}
self.drainSites = set()
self.abortSites = set()
self.refreshPollingCount = 0
try:
if not getattr(self.config.JobSubmitter, 'submitDir', None):
self.config.JobSubmitter.submitDir = self.config.JobSubmitter.componentDir
self.packageDir = os.path.join(self.config.JobSubmitter.submitDir, 'packages')
if not os.path.exists(self.packageDir):
os.makedirs(self.packageDir)
except OSError as ex:
msg = "Error while trying to create packageDir %s\n!"
msg += str(ex)
logging.error(msg)
logging.debug("PackageDir: %s", self.packageDir)
logging.debug("Config: %s", config)
raise JobSubmitterPollerException(msg)
# Now the DAOs
self.listJobsAction = self.daoFactory(classname="Jobs.ListForSubmitter")
self.setLocationAction = self.daoFactory(classname="Jobs.SetLocation")
self.locationAction = self.daoFactory(classname="Locations.GetSiteInfo")
self.setFWJRPathAction = self.daoFactory(classname="Jobs.SetFWJRPath")
self.listWorkflows = self.daoFactory(classname="Workflow.ListForSubmitter")
# Keep a record of the thresholds in memory
self.currentRcThresholds = {}
self.useReqMgrForCompletionCheck = getattr(self.config.TaskArchiver, 'useReqMgrForCompletionCheck', True)
if self.useReqMgrForCompletionCheck:
# only set up this when reqmgr is used (not Tier0)
self.reqmgr2Svc = ReqMgr(self.config.TaskArchiver.ReqMgr2ServiceURL)
self.abortedAndForceCompleteWorkflowCache = self.reqmgr2Svc.getAbortedAndForceCompleteRequestsFromMemoryCache()
else:
# Tier0 Case - just for the clarity (This private variable shouldn't be used
self.abortedAndForceCompleteWorkflowCache = None
return