本文整理汇总了Python中DIRAC.WorkloadManagementSystem.DB.TaskQueueDB.TaskQueueDB类的典型用法代码示例。如果您正苦于以下问题:Python TaskQueueDB类的具体用法?Python TaskQueueDB怎么用?Python TaskQueueDB使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了TaskQueueDB类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: initializeMatcherHandler
def initializeMatcherHandler( serviceInfo ):
""" Matcher Service initialization
"""
global gJobDB
global gJobLoggingDB
global gTaskQueueDB
global gPilotAgentsDB
gJobDB = JobDB()
gJobLoggingDB = JobLoggingDB()
gTaskQueueDB = TaskQueueDB()
gPilotAgentsDB = PilotAgentsDB()
gMonitor.registerActivity( 'matchTime', "Job matching time",
'Matching', "secs" , gMonitor.OP_MEAN, 300 )
gMonitor.registerActivity( 'matchesDone', "Job Match Request",
'Matching', "matches" , gMonitor.OP_RATE, 300 )
gMonitor.registerActivity( 'matchesOK', "Matched jobs",
'Matching', "matches" , gMonitor.OP_RATE, 300 )
gMonitor.registerActivity( 'numTQs', "Number of Task Queues",
'Matching', "tqsk queues" , gMonitor.OP_MEAN, 300 )
gTaskQueueDB.recalculateTQSharesForAll()
gThreadScheduler.addPeriodicTask( 120, gTaskQueueDB.recalculateTQSharesForAll )
gThreadScheduler.addPeriodicTask( 60, sendNumTaskQueues )
sendNumTaskQueues()
return S_OK()
示例2: test_matcher
def test_matcher( self ):
# insert a proper DN to run the test
resourceDescription = {'OwnerGroup': 'prod', 'OwnerDN':'/C=ch/O=DIRAC/OU=DIRAC CI/CN=ciuser/[email protected]',
'DIRACVersion': 'pippo', 'ReleaseVersion':'blabla', 'VirtualOrganization':'LHCB',
'PilotInfoReportedFlag':'True', 'PilotBenchmark':'anotherPilot', 'LHCbPlatform':'CERTO',
'Site':'DIRAC.Jenkins.org', 'CPUTime' : 86400 }
matcher = RPCClient( 'WorkloadManagement/Matcher' )
JobStateUpdate = RPCClient( 'WorkloadManagement/JobStateUpdate' )
wmsClient = WMSClient()
job = helloWorldJob()
job.setDestination( 'DIRAC.Jenkins.org' )
job.setInputData( '/a/bbb' )
job.setType( 'User' )
jobDescription = createFile( job )
res = wmsClient.submitJob( job._toJDL( xmlFile = jobDescription ) )
self.assert_( res['OK'] )
jobID = res['Value']
res = JobStateUpdate.setJobStatus( jobID, 'Waiting', 'matching', 'source' )
self.assert_( res['OK'] )
tqDB = TaskQueueDB()
tqDefDict = {'OwnerDN': '/C=ch/O=DIRAC/OU=DIRAC CI/CN=ciuser/[email protected]',
'OwnerGroup':'prod', 'Setup':'JenkinsSetup', 'CPUTime':86400}
res = tqDB.insertJob( jobID, tqDefDict, 10 )
self.assert_( res['OK'] )
res = matcher.requestJob( resourceDescription )
print res
self.assert_( res['OK'] )
wmsClient.deleteJob( jobID )
示例3: initializeMatcherHandler
def initializeMatcherHandler(serviceInfo):
""" Matcher Service initialization
"""
global jobDB
global jobLoggingDB
global taskQueueDB
jobDB = JobDB()
jobLoggingDB = JobLoggingDB()
taskQueueDB = TaskQueueDB()
gMonitor.registerActivity("matchTime", "Job matching time", "Matching", "secs", gMonitor.OP_MEAN, 300)
gMonitor.registerActivity(
"matchTaskQueues", "Task queues checked per job", "Matching", "task queues", gMonitor.OP_MEAN, 300
)
gMonitor.registerActivity("matchesDone", "Job Matches", "Matching", "matches", gMonitor.OP_MEAN, 300)
gMonitor.registerActivity("numTQs", "Number of Task Queues", "Matching", "tqsk queues", gMonitor.OP_MEAN, 300)
taskQueueDB.recalculateTQSharesForAll()
gThreadScheduler.addPeriodicTask(120, taskQueueDB.recalculateTQSharesForAll)
gThreadScheduler.addPeriodicTask(120, sendNumTaskQueues)
sendNumTaskQueues()
return S_OK()
示例4: initialize
def initialize( self ):
""" Sets defaults
"""
self.am_setOption( "PollingTime", 120 )
self.jobDB = JobDB()
self.taskQueueDB = TaskQueueDB()
self.jobLoggingDB = JobLoggingDB()
# self.sandboxDB = SandboxDB( 'SandboxDB' )
agentTSTypes = self.am_getOption('ProductionTypes', [])
if agentTSTypes:
self.prod_types = agentTSTypes
else:
self.prod_types = Operations().getValue( 'Transformations/DataProcessing', ['MCSimulation', 'Merge'] )
gLogger.info( "Will exclude the following Production types from cleaning %s" % ( ', '.join( self.prod_types ) ) )
self.maxJobsAtOnce = self.am_getOption( 'MaxJobsAtOnce', 500 )
self.jobByJob = self.am_getOption( 'JobByJob', False )
self.throttlingPeriod = self.am_getOption('ThrottlingPeriod', 0.)
self.removeStatusDelay['Done'] = self.am_getOption( 'RemoveStatusDelay/Done', 7 )
self.removeStatusDelay['Killed'] = self.am_getOption( 'RemoveStatusDelay/Killed', 7 )
self.removeStatusDelay['Failed'] = self.am_getOption( 'RemoveStatusDelay/Failed', 7 )
self.removeStatusDelay['Any'] = self.am_getOption( 'RemoveStatusDelay/Any', -1 )
return S_OK()
示例5: __init__
def __init__( self, pilotAgentsDB = None, jobDB = None, tqDB = None, jlDB = None, opsHelper = None ):
""" c'tor
"""
if pilotAgentsDB:
self.pilotAgentsDB = pilotAgentsDB
else:
self.pilotAgentsDB = PilotAgentsDB()
if jobDB:
self.jobDB = jobDB
else:
self.jobDB = JobDB()
if tqDB:
self.tqDB = tqDB
else:
self.tqDB = TaskQueueDB()
if jlDB:
self.jlDB = jlDB
else:
self.jlDB = JobLoggingDB()
if opsHelper:
self.opsHelper = opsHelper
else:
self.opsHelper = Operations()
self.log = gLogger.getSubLogger( "Matcher" )
self.limiter = Limiter( jobDB = self.jobDB, opsHelper = self.opsHelper )
示例6: initializeOptimizer
def initializeOptimizer( self ):
"""Initialize specific parameters for TaskQueueAgent.
"""
self.waitingStatus = self.am_getOption( 'WaitingStatus', 'Waiting' )
self.waitingMinorStatus = self.am_getOption( 'WaitingMinorStatus', 'Pilot Agent Submission' )
try:
self.taskQueueDB = TaskQueueDB()
result = self.taskQueueDB.enableAllTaskQueues()
if not result[ 'OK' ]:
raise Exception( "Can't enable TaskQueues: %s" % result[ 'Message' ] )
except Exception, e:
self.log.exception()
return S_ERROR( "Cannot initialize taskqueueDB: %s" % str( e ) )
示例7: initialize
def initialize( self ):
"""Sets defaults
"""
self.am_setOption( "PollingTime", 60 )
self.jobDB = JobDB()
self.taskQueueDB = TaskQueueDB()
self.jobLoggingDB = JobLoggingDB()
# self.sandboxDB = SandboxDB( 'SandboxDB' )
self.prod_types = self.am_getOption('ProductionTypes',['DataReconstruction', 'DataStripping', 'MCSimulation', 'Merge', 'production'])
gLogger.info('Will exclude the following Production types from cleaning %s'%(string.join(self.prod_types,', ')))
self.maxJobsAtOnce = self.am_getOption('MaxJobsAtOnce',200)
self.jobByJob = self.am_getOption('JobByJob',True)
self.throttlingPeriod = self.am_getOption('ThrottlingPeriod',0.)
return S_OK()
示例8: initialize
def initialize(self):
"""Sets defaults
"""
self.am_setOption("PollingTime", 60)
self.jobDB = JobDB()
self.taskQueueDB = TaskQueueDB()
# self.sandboxDB = SandboxDB( 'SandboxDB' )
self.prod_types = self.am_getOption(
"ProductionTypes", ["DataReconstruction", "DataStripping", "MCSimulation", "Merge", "production"]
)
gLogger.info(
"Will exclude the following Production types from cleaning %s" % (string.join(self.prod_types, ", "))
)
self.maxJobsAtOnce = self.am_getOption("MaxJobsAtOnce", 200)
self.jobByJob = self.am_getOption("JobByJob", True)
self.throttlingPeriod = self.am_getOption("ThrottlingPeriod", 0.0)
return S_OK()
示例9: initialize
def initialize( self ):
"""Sets defaults
"""
self.am_setOption( "PollingTime", 60 )
self.jobDB = JobDB()
self.taskQueueDB = TaskQueueDB()
self.jobLoggingDB = JobLoggingDB()
# self.sandboxDB = SandboxDB( 'SandboxDB' )
agentTSTypes = self.am_getOption('ProductionTypes', [])
if agentTSTypes:
self.prod_types = agentTSTypes
else:
self.prod_types = Operations().getValue( 'Transformations/DataProcessing', ['MCSimulation', 'Merge'] )
gLogger.info('Will exclude the following Production types from cleaning %s'%(string.join(self.prod_types,', ')))
self.maxJobsAtOnce = self.am_getOption('MaxJobsAtOnce',100)
self.jobByJob = self.am_getOption('JobByJob',True)
self.throttlingPeriod = self.am_getOption('ThrottlingPeriod',0.)
return S_OK()
示例10: TaskQueueAgent
class TaskQueueAgent( OptimizerModule ):
"""
The specific Optimizer must provide the following methods:
- initializeOptimizer() before each execution cycle
- checkJob() - the main method called for each job
"""
#############################################################################
def initializeOptimizer( self ):
"""Initialize specific parameters for TaskQueueAgent.
"""
self.waitingStatus = self.am_getOption( 'WaitingStatus', 'Waiting' )
self.waitingMinorStatus = self.am_getOption( 'WaitingMinorStatus', 'Pilot Agent Submission' )
try:
self.taskQueueDB = TaskQueueDB()
result = self.taskQueueDB.enableAllTaskQueues()
if not result[ 'OK' ]:
raise Exception( "Can't enable TaskQueues: %s" % result[ 'Message' ] )
except Exception, e:
self.log.exception()
return S_ERROR( "Cannot initialize taskqueueDB: %s" % str( e ) )
return S_OK()
示例11: JobCleaningAgent
class JobCleaningAgent( AgentModule ):
"""
The specific agents must provide the following methods:
- initialize() for initial settings
- beginExecution()
- execute() - the main method called in the agent cycle
- endExecution()
- finalize() - the graceful exit of the method, this one is usually used
for the agent restart
"""
#############################################################################
def initialize( self ):
"""Sets defaults
"""
self.am_setOption( "PollingTime", 60 )
self.jobDB = JobDB()
self.taskQueueDB = TaskQueueDB()
self.jobLoggingDB = JobLoggingDB()
# self.sandboxDB = SandboxDB( 'SandboxDB' )
agentTSTypes = self.am_getOption('ProductionTypes', [])
if agentTSTypes:
self.prod_types = agentTSTypes
else:
self.prod_types = Operations().getValue( 'Transformations/DataProcessing', ['MCSimulation', 'Merge'] )
gLogger.info('Will exclude the following Production types from cleaning %s'%(string.join(self.prod_types,', ')))
self.maxJobsAtOnce = self.am_getOption('MaxJobsAtOnce',100)
self.jobByJob = self.am_getOption('JobByJob',True)
self.throttlingPeriod = self.am_getOption('ThrottlingPeriod',0.)
return S_OK()
def __getAllowedJobTypes( self ):
#Get valid jobTypes
result = self.jobDB.getDistinctJobAttributes( 'JobType' )
if not result[ 'OK' ]:
return result
cleanJobTypes = []
for jobType in result[ 'Value' ]:
if jobType not in self.prod_types:
cleanJobTypes.append( jobType )
self.log.notice( "JobTypes to clean %s" % cleanJobTypes )
return S_OK( cleanJobTypes )
#############################################################################
def execute( self ):
"""The PilotAgent execution method.
"""
#Delete jobs in "Deleted" state
result = self.removeJobsByStatus( { 'Status' : 'Deleted' } )
if not result[ 'OK' ]:
return result
#Get all the Job types that can be cleaned
result = self.__getAllowedJobTypes()
if not result[ 'OK' ]:
return result
baseCond = { 'JobType' : result[ 'Value' ] }
# Remove jobs with final status
for status in REMOVE_STATUS_DELAY:
delay = REMOVE_STATUS_DELAY[ status ]
condDict = dict( baseCond )
condDict[ 'Status' ] = status
delTime = str( Time.dateTime() - delay * Time.day )
result = self.removeJobsByStatus( condDict, delTime )
if not result['OK']:
gLogger.warn( 'Failed to remove jobs in status %s' % status )
return S_OK()
def removeJobsByStatus( self, condDict, delay = False ):
""" Remove deleted jobs
"""
if delay:
gLogger.verbose( "Removing jobs with %s and older than %s" % ( condDict, delay ) )
result = self.jobDB.selectJobs( condDict, older = delay, limit = self.maxJobsAtOnce )
else:
gLogger.verbose( "Removing jobs with %s " % condDict )
result = self.jobDB.selectJobs( condDict, limit = self.maxJobsAtOnce )
if not result['OK']:
return result
jobList = result['Value']
if len(jobList) > self.maxJobsAtOnce:
jobList = jobList[:self.maxJobsAtOnce]
if not jobList:
return S_OK()
self.log.notice( "Deleting %s jobs for %s" % ( len( jobList ), condDict ) )
count = 0
error_count = 0
result = SandboxStoreClient( useCertificates = True ).unassignJobs( jobList )
if not result[ 'OK' ]:
gLogger.warn( "Cannot unassign jobs to sandboxes", result[ 'Message' ] )
result = self.deleteJobOversizedSandbox( jobList )
if not result[ 'OK' ]:
gLogger.warn( "Cannot schedle removal of oversized sandboxes", result[ 'Message' ] )
return result
#.........这里部分代码省略.........
示例12: JobCleaningAgent
class JobCleaningAgent( AgentModule ):
"""
The specific agents must provide the following methods:
* initialize() for initial settings
* beginExecution()
* execute() - the main method called in the agent cycle
* endExecution()
* finalize() - the graceful exit of the method, this one is usually used for the agent restart
"""
def __init__( self, *args, **kwargs ):
""" c'tor
"""
AgentModule.__init__( self, *args, **kwargs )
#clients
# FIXME: shouldn't we avoid using the DBs directly, and instead go through the service?
self.jobDB = None
self.taskQueueDB = None
self.jobLoggingDB = None
self.maxJobsAtOnce = 100
self.jobByJob = False
self.throttlingPeriod = 0.
self.prodTypes = []
self.removeStatusDelay = {}
#############################################################################
def initialize( self ):
""" Sets defaults
"""
self.am_setOption( "PollingTime", 120 )
self.jobDB = JobDB()
self.taskQueueDB = TaskQueueDB()
self.jobLoggingDB = JobLoggingDB()
# self.sandboxDB = SandboxDB( 'SandboxDB' )
agentTSTypes = self.am_getOption('ProductionTypes', [])
if agentTSTypes:
self.prodTypes = agentTSTypes
else:
self.prodTypes = Operations().getValue(
'Transformations/DataProcessing', ['MCSimulation', 'Merge'])
gLogger.info("Will exclude the following Production types from cleaning %s" % (
', '.join(self.prodTypes)))
self.maxJobsAtOnce = self.am_getOption( 'MaxJobsAtOnce', 500 )
self.jobByJob = self.am_getOption( 'JobByJob', False )
self.throttlingPeriod = self.am_getOption('ThrottlingPeriod', 0.)
self.removeStatusDelay['Done'] = self.am_getOption( 'RemoveStatusDelay/Done', 7 )
self.removeStatusDelay['Killed'] = self.am_getOption( 'RemoveStatusDelay/Killed', 7 )
self.removeStatusDelay['Failed'] = self.am_getOption( 'RemoveStatusDelay/Failed', 7 )
self.removeStatusDelay['Any'] = self.am_getOption( 'RemoveStatusDelay/Any', -1 )
return S_OK()
def __getAllowedJobTypes( self ):
""" Get valid jobTypes
"""
result = self.jobDB.getDistinctJobAttributes( 'JobType' )
if not result[ 'OK' ]:
return result
cleanJobTypes = []
for jobType in result[ 'Value' ]:
if jobType not in self.prodTypes:
cleanJobTypes.append( jobType )
self.log.notice( "JobTypes to clean %s" % cleanJobTypes )
return S_OK( cleanJobTypes )
#############################################################################
def execute( self ):
""" Remove jobs in various status
"""
#Delete jobs in "Deleted" state
result = self.removeJobsByStatus( { 'Status' : 'Deleted' } )
if not result[ 'OK' ]:
return result
#Get all the Job types that can be cleaned
result = self.__getAllowedJobTypes()
if not result[ 'OK' ]:
return result
# No jobs in the system subject to removal
if not result['Value']:
return S_OK()
baseCond = { 'JobType' : result[ 'Value' ] }
# Remove jobs with final status
for status in self.removeStatusDelay:
delay = self.removeStatusDelay[ status ]
if delay < 0:
# Negative delay means don't delete anything...
continue
condDict = dict( baseCond )
if status != 'Any':
condDict[ 'Status' ] = status
delTime = str( Time.dateTime() - delay * Time.day )
#.........这里部分代码省略.........
示例13: type
# print db.checkImageStatus( 'name', 'flavor'*10, 'requirements' )
ret = db.insertInstance( 'Image3', 'instance' )
print "insertInstance ", ret
ret = db.insertInstance( 'Image2', 'instance' )
print "insertInstance ", ret
if not ret['OK']:
DIRAC.exit()
print type( ret['Value'] )
print "declareInstanceSubmitted", db.declareInstanceSubmitted( ret['Value'] )
id1 = DIRAC.Time.toString()
print "declareInstanceRunning ", db.declareInstanceRunning( 'Image3', id1, 'IP', 'ip' )
id2 = DIRAC.Time.toString()
print "declareInstanceRunning ", db.declareInstanceRunning( 'Image2', id2, 'IP', 'ip' )
print "declareInstanceRunning ", db.instanceIDHeartBeat( id2, 1.0 )
for status in validStates:
print "get%10sInstances " % status, db.getInstancesByStatus( status )
print "declareInstanceHalting ", db.declareInstanceHalting( id1, 0.0 )
print "declareInstanceHalting ", db.declareInstanceHalting( id2, 0.0 )
print "declareStalledInstances ", db.declareStalledInstances()
print "declareStalledInstances ", db.declareStalledInstances()
from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import TaskQueueDB
tq = TaskQueueDB()
print tq.retrieveTaskQueues()
示例14: JobDB
# File : dirac-admin-submit-pilot-for-job
# Author : Ricardo Graciani
########################################################################
__RCSID__ = "cd6b25c (2010-12-04 11:45:50 +0000) Ricardo Graciani <[email protected]>"
import sys
import DIRAC
from DIRAC.Core.Base import Script
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import TaskQueueDB
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
jobdb = JobDB()
tqdb = TaskQueueDB()
result = jobdb.selectJobs( { 'Status' : [ 'Received', 'Checking', 'Waiting' ] } )
if not result[ 'OK' ]:
print result[ 'Message' ]
sys.exit( 1 )
jobList = result[ 'Value' ]
print tqdb.forceRecreationOfTables()
for job in jobList:
result = jobdb.getJobAttribute( job, 'RescheduleCounter' )
if not result[ 'OK' ]:
print "Cannot get reschedule counter for job %s" % job
rC = 0
rC = result[ 'Value' ]
if rC >= jobdb.maxRescheduling:
jobdb.setJobAttribute( job, "RescheduleCounter", "0" )
示例15: Matcher
class Matcher( object ):
""" Logic for matching
"""
def __init__( self, pilotAgentsDB = None, jobDB = None, tqDB = None, jlDB = None, opsHelper = None ):
""" c'tor
"""
if pilotAgentsDB:
self.pilotAgentsDB = pilotAgentsDB
else:
self.pilotAgentsDB = PilotAgentsDB()
if jobDB:
self.jobDB = jobDB
else:
self.jobDB = JobDB()
if tqDB:
self.tqDB = tqDB
else:
self.tqDB = TaskQueueDB()
if jlDB:
self.jlDB = jlDB
else:
self.jlDB = JobLoggingDB()
if opsHelper:
self.opsHelper = opsHelper
else:
self.opsHelper = Operations()
self.log = gLogger.getSubLogger( "Matcher" )
self.limiter = Limiter( jobDB = self.jobDB, opsHelper = self.opsHelper )
def selectJob( self, resourceDescription, credDict ):
""" Main job selection function to find the highest priority job matching the resource capacity
"""
startTime = time.time()
resourceDict = self._getResourceDict( resourceDescription, credDict )
negativeCond = self.limiter.getNegativeCondForSite( resourceDict['Site'] )
result = self.tqDB.matchAndGetJob( resourceDict, negativeCond = negativeCond )
if not result['OK']:
return result
result = result['Value']
if not result['matchFound']:
self.log.info( "No match found" )
raise RuntimeError( "No match found" )
jobID = result['jobId']
resAtt = self.jobDB.getJobAttributes( jobID, ['OwnerDN', 'OwnerGroup', 'Status'] )
if not resAtt['OK']:
raise RuntimeError( 'Could not retrieve job attributes' )
if not resAtt['Value']:
raise RuntimeError( "No attributes returned for job" )
if not resAtt['Value']['Status'] == 'Waiting':
self.log.error( 'Job matched by the TQ is not in Waiting state', str( jobID ) )
result = self.tqDB.deleteJob( jobID )
if not result[ 'OK' ]:
return result
raise RuntimeError( "Job %s is not in Waiting state" % str( jobID ) )
self._reportStatus( resourceDict, jobID )
result = self.jobDB.getJobJDL( jobID )
if not result['OK']:
raise RuntimeError( "Failed to get the job JDL" )
resultDict = {}
resultDict['JDL'] = result['Value']
resultDict['JobID'] = jobID
matchTime = time.time() - startTime
self.log.info( "Match time: [%s]" % str( matchTime ) )
gMonitor.addMark( "matchTime", matchTime )
# Get some extra stuff into the response returned
resOpt = self.jobDB.getJobOptParameters( jobID )
if resOpt['OK']:
for key, value in resOpt['Value'].items():
resultDict[key] = value
resAtt = self.jobDB.getJobAttributes( jobID, ['OwnerDN', 'OwnerGroup'] )
if not resAtt['OK']:
raise RuntimeError( 'Could not retrieve job attributes' )
if not resAtt['Value']:
raise RuntimeError( 'No attributes returned for job' )
if self.opsHelper.getValue( "JobScheduling/CheckMatchingDelay", True ):
self.limiter.updateDelayCounters( resourceDict['Site'], jobID )
pilotInfoReportedFlag = resourceDict.get( 'PilotInfoReportedFlag', False )
if not pilotInfoReportedFlag:
self._updatePilotInfo( resourceDict )
self._updatePilotJobMapping( resourceDict, jobID )
resultDict['DN'] = resAtt['Value']['OwnerDN']
resultDict['Group'] = resAtt['Value']['OwnerGroup']
#.........这里部分代码省略.........