本文整理汇总了Python中DIRAC.WorkloadManagementSystem.Client.WMSClient.WMSClient类的典型用法代码示例。如果您正苦于以下问题:Python WMSClient类的具体用法?Python WMSClient怎么用?Python WMSClient使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了WMSClient类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_matcher
def test_matcher( self ):
# insert a proper DN to run the test
resourceDescription = {'OwnerGroup': 'prod', 'OwnerDN':'/C=ch/O=DIRAC/OU=DIRAC CI/CN=ciuser/[email protected]',
'DIRACVersion': 'pippo', 'ReleaseVersion':'blabla', 'VirtualOrganization':'LHCB',
'PilotInfoReportedFlag':'True', 'PilotBenchmark':'anotherPilot', 'LHCbPlatform':'CERTO',
'Site':'DIRAC.Jenkins.org', 'CPUTime' : 86400 }
matcher = RPCClient( 'WorkloadManagement/Matcher' )
JobStateUpdate = RPCClient( 'WorkloadManagement/JobStateUpdate' )
wmsClient = WMSClient()
job = helloWorldJob()
job.setDestination( 'DIRAC.Jenkins.org' )
job.setInputData( '/a/bbb' )
job.setType( 'User' )
jobDescription = createFile( job )
res = wmsClient.submitJob( job._toJDL( xmlFile = jobDescription ) )
self.assert_( res['OK'] )
jobID = res['Value']
res = JobStateUpdate.setJobStatus( jobID, 'Waiting', 'matching', 'source' )
self.assert_( res['OK'] )
tqDB = TaskQueueDB()
tqDefDict = {'OwnerDN': '/C=ch/O=DIRAC/OU=DIRAC CI/CN=ciuser/[email protected]',
'OwnerGroup':'prod', 'Setup':'JenkinsSetup', 'CPUTime':86400}
res = tqDB.insertJob( jobID, tqDefDict, 10 )
self.assert_( res['OK'] )
res = matcher.requestJob( resourceDescription )
print res
self.assert_( res['OK'] )
wmsClient.deleteJob( jobID )
示例2: test_ParametricChain
def test_ParametricChain(self):
""" This test will submit a parametric job which should generate 3 actual jobs
"""
wmsClient = WMSClient()
jobStateUpdate = RPCClient('WorkloadManagement/JobStateUpdate')
jobMonitor = JobMonitoringClient()
# create the job
job = parametricJob()
jobDescription = createFile(job)
# submit the job
result = wmsClient.submitJob(job._toJDL(xmlFile=jobDescription))
self.assertTrue(result['OK'])
jobIDList = result['Value']
self.assertEqual(len(jobIDList), 3)
result = jobMonitor.getJobsParameters(jobIDList, ['JobName'])
self.assertTrue(result['OK'])
jobNames = [result['Value'][jobID]['JobName'] for jobID in result['Value']]
self.assertEqual(set(jobNames), set(['parametric_helloWorld_%s' % nJob for nJob in range(3)]))
for jobID in jobIDList:
result = jobStateUpdate.setJobStatus(jobID, 'Done', 'matching', 'source')
self.assertTrue(result['OK'])
result = wmsClient.deleteJob(jobIDList)
self.assertTrue(result['OK'])
for jobID in jobIDList:
result = jobMonitor.getJobStatus(jobID)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'], 'Deleted')
示例3: __sendKillCommand
def __sendKillCommand(self, job):
"""Send a kill signal to the job such that it cannot continue running.
:param int job: ID of job to send kill command
"""
ownerDN = self.jobDB.getJobAttribute(job, 'OwnerDN')
ownerGroup = self.jobDB.getJobAttribute(job, 'OwnerGroup')
if ownerDN['OK'] and ownerGroup['OK']:
wmsClient = WMSClient(useCertificates=True, delegatedDN=ownerDN['Value'], delegatedGroup=ownerGroup['Value'])
resKill = wmsClient.killJob(job)
if not resKill['OK']:
self.log.error("Failed to send kill command to job", "%s: %s" % (job, resKill['Message']))
else:
self.log.error("Failed to get ownerDN or Group for job:", "%s: %s, %s" %
(job, ownerDN.get('Message', ''), ownerGroup.get('Message', '')))
示例4: __init__
def __init__( self, *args, **kwargs ):
''' c'tor
'''
AgentModule.__init__( self, *args, **kwargs )
# # replica manager
self.replicaManager = ReplicaManager()
# # transformation client
self.transClient = TransformationClient()
# # wms client
self.wmsClient = WMSClient()
# # request client
self.requestClient = RequestClient()
# # file catalog clinet
self.metadataClient = FileCatalogClient()
# # placeholders for CS options
# # transformations types
self.transformationTypes = None
# # directory locations
self.directoryLocations = None
# # transformation metadata
self.transfidmeta = None
# # archive periof in days
self.archiveAfter = None
# # active SEs
self.activeStorages = None
# # transformation log SEs
self.logSE = None
# # enable/disable execution
self.enableFlag = None
示例5: initialize
def initialize( self ):
"""Sets defaults """
self.replicaManager = ReplicaManager()
self.transClient = TransformationClient()
self.wmsClient = WMSClient()
self.requestClient = RequestClient()
self.metadataClient = FileCatalogClient()
self.storageUsageClient = StorageUsageClient()
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/DataManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption( 'shifterProxy', 'DataManager' )
self.transformationTypes = sortList( self.am_getOption( 'TransformationTypes', ['MCSimulation', 'DataReconstruction', 'DataStripping', 'MCStripping', 'Merge', 'Replication'] ) )
gLogger.info( "Will consider the following transformation types: %s" % str( self.transformationTypes ) )
self.directoryLocations = sortList( self.am_getOption( 'DirectoryLocations', ['TransformationDB', 'StorageUsage', 'MetadataCatalog'] ) )
gLogger.info( "Will search for directories in the following locations: %s" % str( self.directoryLocations ) )
self.transfidmeta = self.am_getOption( 'TransfIDMeta', "TransformationID" )
gLogger.info( "Will use %s as metadata tag name for TransformationID" % self.transfidmeta )
self.archiveAfter = self.am_getOption( 'ArchiveAfter', 7 ) # days
gLogger.info( "Will archive Completed transformations after %d days" % self.archiveAfter )
self.activeStorages = sortList( self.am_getOption( 'ActiveSEs', [] ) )
gLogger.info( "Will check the following storage elements: %s" % str( self.activeStorages ) )
self.logSE = self.am_getOption( 'TransformationLogSE', 'LogSE' )
gLogger.info( "Will remove logs found on storage element: %s" % self.logSE )
return S_OK()
示例6: initialize
def initialize(self):
""" agent initialisation
reading and setting confing opts
:param self: self reference
"""
# # shifter proxy
# See cleanCatalogContents method: this proxy will be used ALSO when the file catalog used
# is the DIRAC File Catalog (DFC).
# This is possible because of unset of the "UseServerCertificate" option
self.shifterProxy = self.am_getOption('shifterProxy', None)
# # transformations types
self.dataProcTTypes = Operations().getValue('Transformations/DataProcessing', self.dataProcTTypes)
self.dataManipTTypes = Operations().getValue('Transformations/DataManipulation', self.dataManipTTypes)
agentTSTypes = self.am_getOption('TransformationTypes', [])
if agentTSTypes:
self.transformationTypes = sorted(agentTSTypes)
else:
self.transformationTypes = sorted(self.dataProcTTypes + self.dataManipTTypes)
self.log.info("Will consider the following transformation types: %s" % str(self.transformationTypes))
# # directory locations
self.directoryLocations = sorted(self.am_getOption('DirectoryLocations', self.directoryLocations))
self.log.info("Will search for directories in the following locations: %s" % str(self.directoryLocations))
# # transformation metadata
self.transfidmeta = self.am_getOption('TransfIDMeta', self.transfidmeta)
self.log.info("Will use %s as metadata tag name for TransformationID" % self.transfidmeta)
# # archive periof in days
self.archiveAfter = self.am_getOption('ArchiveAfter', self.archiveAfter) # days
self.log.info("Will archive Completed transformations after %d days" % self.archiveAfter)
# # active SEs
self.activeStorages = sorted(self.am_getOption('ActiveSEs', self.activeStorages))
if self.activeStorages:
self.log.info("Will check the following storage elements: %s" % str(self.activeStorages))
# # transformation log SEs
self.logSE = Operations().getValue('/LogStorage/LogSE', self.logSE)
self.log.info("Will remove logs found on storage element: %s" % self.logSE)
# # transformation client
self.transClient = TransformationClient()
# # wms client
self.wmsClient = WMSClient()
# # request client
self.reqClient = ReqClient()
# # file catalog client
self.metadataClient = FileCatalogClient()
return S_OK()
示例7: __init__
def __init__(self, transClient=None, logger=None, submissionClient=None, jobMonitoringClient=None,
outputDataModule=None, jobClass=None, opsH=None, destinationPlugin=None,
ownerDN=None, ownerGroup=None):
""" Generates some default objects.
jobClass is by default "DIRAC.Interfaces.API.Job.Job". An extension of it also works:
VOs can pass in their job class extension, if present
"""
if not logger:
logger = gLogger.getSubLogger('WorkflowTasks')
super(WorkflowTasks, self).__init__(transClient, logger)
useCertificates = True if (bool(ownerDN) and bool(ownerGroup)) else False
if not submissionClient:
self.submissionClient = WMSClient(useCertificates=useCertificates,
delegatedDN=ownerDN,
delegatedGroup=ownerGroup)
else:
self.submissionClient = submissionClient
if not jobMonitoringClient:
self.jobMonitoringClient = JobMonitoringClient()
else:
self.jobMonitoringClient = jobMonitoringClient
if not jobClass:
self.jobClass = Job
else:
self.jobClass = jobClass
if not opsH:
self.opsH = Operations()
else:
self.opsH = opsH
if not outputDataModule:
self.outputDataModule = self.opsH.getValue("Transformations/OutputDataModule", "")
else:
self.outputDataModule = outputDataModule
if not destinationPlugin:
self.destinationPlugin = self.opsH.getValue('Transformations/DestinationPlugin', 'BySE')
else:
self.destinationPlugin = destinationPlugin
self.destinationPlugin_o = None
self.outputDataModule_o = None
示例8: initialize
def initialize( self ):
""" agent initialisation
reading and setting confing opts
:param self: self reference
"""
# # shifter proxy
self.am_setOption( 'shifterProxy', 'DataManager' )
# # transformations types
self.dataProcTTypes = Operations().getValue( 'Transformations/DataProcessing', ['MCSimulation', 'Merge'] )
self.dataManipTTypes = Operations().getValue( 'Transformations/DataManipulation', ['Replication', 'Removal'] )
agentTSTypes = self.am_getOption( 'TransformationTypes', [] )
if agentTSTypes:
self.transformationTypes = sorted( agentTSTypes )
else:
self.transformationTypes = sorted( self.dataProcTTypes + self.dataManipTTypes )
self.log.info( "Will consider the following transformation types: %s" % str( self.transformationTypes ) )
# # directory locations
self.directoryLocations = sorted( self.am_getOption( 'DirectoryLocations', [ 'TransformationDB',
'MetadataCatalog' ] ) )
self.log.info( "Will search for directories in the following locations: %s" % str( self.directoryLocations ) )
# # transformation metadata
self.transfidmeta = self.am_getOption( 'TransfIDMeta', "TransformationID" )
self.log.info( "Will use %s as metadata tag name for TransformationID" % self.transfidmeta )
# # archive periof in days
self.archiveAfter = self.am_getOption( 'ArchiveAfter', 7 ) # days
self.log.info( "Will archive Completed transformations after %d days" % self.archiveAfter )
# # active SEs
self.activeStorages = sorted( self.am_getOption( 'ActiveSEs', [] ) )
self.log.info( "Will check the following storage elements: %s" % str( self.activeStorages ) )
# # transformation log SEs
self.logSE = Operations().getValue( '/LogStorage/LogSE', 'LogSE' )
self.log.info( "Will remove logs found on storage element: %s" % self.logSE )
# # enable/disable execution, should be using CS option Status?? with default value as 'Active'??
self.enableFlag = self.am_getOption( 'EnableFlag', 'True' )
# # data manager
# self.dm = DataManager()
# # transformation client
self.transClient = TransformationClient()
# # wms client
self.wmsClient = WMSClient()
# # request client
self.reqClient = ReqClient()
# # file catalog client
self.metadataClient = FileCatalogClient()
return S_OK()
示例9: __init__
def __init__( self, agentName, baseAgentName = False, properties = dict() ):
""" c'tor
:param self: self reference
:param str agentName: name of agent
:param bool baseAgentName: whatever
:param dict properties: whatever else
"""
AgentModule.__init__( self, agentName, baseAgentName, properties )
## replica manager
self.replicaManager = ReplicaManager()
## transformation client
self.transClient = TransformationClient()
## wms client
self.wmsClient = WMSClient()
## request client
self.requestClient = RequestClient()
## file catalog clinet
self.metadataClient = FileCatalogClient()
## storage usage agent
self.storageUsageClient = StorageUsageClient()
## placeholders for CS options
## transformations types
self.transformationTypes = None
## directory locations
self.directoryLocations = None
## transformation metadata
self.transfidmeta = None
## archive periof in days
self.archiveAfter = None
## active SEs
self.activeStorages = None
## transformation log SEs
self.logSE = None
## enable/disable execution
self.enableFlag = None
示例10: test_JobStateUpdateAndJobMonitoringMultuple
def test_JobStateUpdateAndJobMonitoringMultuple( self ):
""" # Now, let's submit some jobs. Different sites, types, inputs
"""
wmsClient = WMSClient()
jobMonitor = JobMonitoringClient()
jobStateUpdate = RPCClient( 'WorkloadManagement/JobStateUpdate' )
jobIDs = []
dests = ['DIRAC.site1.org', 'DIRAC.site2.org']
lfnss = [['/a/1.txt', '/a/2.txt'], ['/a/1.txt', '/a/3.txt', '/a/4.txt'], []]
types = ['User', 'Test']
for dest in dests:
for lfns in lfnss:
for jobType in types:
job = helloWorldJob()
job.setDestination( dest )
job.setInputData( lfns )
job.setType( jobType )
jobDescription = createFile( job )
res = wmsClient.submitJob( job._toJDL( xmlFile = jobDescription ) )
self.assert_( res['OK'] )
jobID = res['Value']
jobIDs.append( jobID )
res = jobMonitor.getSites()
self.assert_( res['OK'] )
self.assert_( set( res['Value'] ) <= set( dests + ['ANY', 'DIRAC.Jenkins.org'] ) )
res = jobMonitor.getJobTypes()
self.assert_( res['OK'] )
self.assertEqual( sorted( res['Value'] ), sorted( types ) )
res = jobMonitor.getApplicationStates()
self.assert_( res['OK'] )
self.assertEqual( sorted( res['Value'] ), sorted( ['Unknown'] ) )
res = jobMonitor.getOwners()
self.assert_( res['OK'] )
res = jobMonitor.getOwnerGroup()
self.assert_( res['OK'] )
res = jobMonitor.getProductionIds()
self.assert_( res['OK'] )
res = jobMonitor.getJobGroups()
self.assert_( res['OK'] )
res = jobMonitor.getStates()
self.assert_( res['OK'] )
self.assert_( sorted( res['Value'] ) in [['Received'], sorted( ['Received', 'Waiting'] )] )
res = jobMonitor.getMinorStates()
self.assert_( res['OK'] )
self.assert_( sorted( res['Value'] ) in [['Job accepted'], sorted( ['Job accepted', 'matching'] ) ] )
self.assert_( res['OK'] )
res = jobMonitor.getJobs()
self.assert_( res['OK'] )
self.assert_( set( [str( x ) for x in jobIDs] ) <= set( res['Value'] ) )
# res = jobMonitor.getCounters(attrList)
# self.assert_( res['OK'] )
res = jobMonitor.getCurrentJobCounters()
self.assert_( res['OK'] )
try:
self.assert_( res['Value'].get( 'Received' ) + res['Value'].get( 'Waiting' ) >= long( len( dests ) * len( lfnss ) * len( types ) ) )
except TypeError:
pass
res = jobMonitor.getJobsSummary( jobIDs )
self.assert_( res['OK'] )
res = jobMonitor.getJobPageSummaryWeb( {}, [], 0, 100 )
self.assert_( res['OK'] )
res = jobStateUpdate.setJobStatusBulk( jobID, {str( datetime.datetime.utcnow() ):{'Status': 'Running',
'MinorStatus': 'MinorStatus',
'ApplicationStatus': 'ApplicationStatus',
'Source': 'Unknown'}} )
self.assert_( res['OK'] )
res = jobStateUpdate.setJobsParameter( {jobID:['Status', 'Running']} )
self.assert_( res['OK'] )
# delete the jobs - this will just set its status to "deleted"
wmsClient.deleteJob( jobIDs )
示例11: test_JobStateUpdateAndJobMonitoring
def test_JobStateUpdateAndJobMonitoring( self ):
""" Verifying all JobStateUpdate and JobMonitoring functions
"""
wmsClient = WMSClient()
jobMonitor = JobMonitoringClient()
jobStateUpdate = RPCClient( 'WorkloadManagement/JobStateUpdate' )
# create a job and check stuff
job = helloWorldJob()
jobDescription = createFile( job )
# submitting the job. Checking few stuff
res = wmsClient.submitJob( job._toJDL( xmlFile = jobDescription ) )
self.assert_( res['OK'] )
jobID = int ( res['Value'] )
# jobID = res['JobID']
res = jobMonitor.getJobJDL( jobID, True )
self.assert_( res['OK'] )
res = jobMonitor.getJobJDL( jobID, False )
self.assert_( res['OK'] )
# Adding stuff
res = jobStateUpdate.setJobStatus( jobID, 'Matched', 'matching', 'source' )
self.assert_( res['OK'] )
res = jobStateUpdate.setJobParameters( jobID, [( 'par1', 'par1Value' ), ( 'par2', 'par2Value' )] )
self.assert_( res['OK'] )
res = jobStateUpdate.setJobApplicationStatus( jobID, 'app status', 'source' )
self.assert_( res['OK'] )
# res = jobStateUpdate.setJobFlag()
# self.assert_( res['OK'] )
# res = jobStateUpdate.unsetJobFlag()
# self.assert_( res['OK'] )
res = jobStateUpdate.setJobSite( jobID, 'Site' )
self.assert_( res['OK'] )
# res = jobMonitor.traceJobParameter( 'Site', 1, 'Status' )
# self.assert_( res['OK'] )
# now checking few things
res = jobMonitor.getJobStatus( jobID )
self.assert_( res['OK'] )
self.assertEqual( res['Value'], 'Running' )
res = jobMonitor.getJobParameter( jobID, 'par1' )
self.assert_( res['OK'] )
self.assertEqual( res['Value'], {'par1': 'par1Value'} )
res = jobMonitor.getJobParameters( jobID )
self.assert_( res['OK'] )
self.assertEqual( res['Value'], {'par1': 'par1Value', 'par2': 'par2Value'} )
res = jobMonitor.getJobAttribute( jobID, 'Site' )
self.assert_( res['OK'] )
self.assertEqual( res['Value'], 'Site' )
res = jobMonitor.getJobAttributes( jobID )
self.assert_( res['OK'] )
self.assertEqual( res['Value']['ApplicationStatus'], 'app status' )
self.assertEqual( res['Value']['JobName'], 'helloWorld' )
res = jobMonitor.getJobSummary( jobID )
self.assert_( res['OK'] )
self.assertEqual( res['Value']['ApplicationStatus'], 'app status' )
self.assertEqual( res['Value']['Status'], 'Running' )
res = jobMonitor.getJobHeartBeatData( jobID )
self.assert_( res['OK'] )
self.assertEqual( res['Value'], [] )
res = jobMonitor.getInputData( jobID )
self.assert_( res['OK'] )
self.assertEqual( res['Value'], [] )
res = jobMonitor.getJobPrimarySummary( jobID )
self.assert_( res['OK'] )
res = jobMonitor.getAtticJobParameters( jobID )
self.assert_( res['OK'] )
res = jobStateUpdate.setJobsStatus( [jobID], 'Done', 'MinorStatus', 'Unknown' )
self.assert_( res['OK'] )
res = jobMonitor.getJobSummary( jobID )
self.assert_( res['OK'] )
self.assertEqual( res['Value']['Status'], 'Done' )
self.assertEqual( res['Value']['MinorStatus'], 'MinorStatus' )
self.assertEqual( res['Value']['ApplicationStatus'], 'app status' )
res = jobStateUpdate.sendHeartBeat( jobID, {'bih':'bih'}, {'boh':'boh'} )
self.assert_( res['OK'] )
# delete the job - this will just set its status to "deleted"
wmsClient.deleteJob( jobID )
示例12: TransformationCleaningAgent
class TransformationCleaningAgent( AgentModule ):
'''
.. class:: TransformationCleaningAgent
:param ReplicaManger replicaManager: ReplicaManager instance
:param TransfromationClient transClient: TransfromationClient instance
:param RequestClient requestClient: RequestClient instance
:param FileCatalogClient metadataClient: FileCatalogClient instance
'''
def __init__( self, *args, **kwargs ):
''' c'tor
'''
AgentModule.__init__( self, *args, **kwargs )
# # replica manager
self.replicaManager = ReplicaManager()
# # transformation client
self.transClient = TransformationClient()
# # wms client
self.wmsClient = WMSClient()
# # request client
self.requestClient = RequestClient()
# # file catalog clinet
self.metadataClient = FileCatalogClient()
# # placeholders for CS options
# # transformations types
self.transformationTypes = None
# # directory locations
self.directoryLocations = None
# # transformation metadata
self.transfidmeta = None
# # archive periof in days
self.archiveAfter = None
# # active SEs
self.activeStorages = None
# # transformation log SEs
self.logSE = None
# # enable/disable execution
self.enableFlag = None
def initialize( self ):
''' agent initialisation
reading and setting confing opts
:param self: self reference
'''
# # shifter proxy
self.am_setOption( 'shifterProxy', 'DataManager' )
# # transformations types
agentTSTypes = self.am_getOption( 'TransformationTypes', [] )
if agentTSTypes:
self.transformationTypes = sortList( agentTSTypes )
else:
dataProc = Operations().getValue( 'Transformations/DataProcessing', ['MCSimulation', 'Merge'] )
dataManip = Operations().getValue( 'Transformations/DataManipulation', ['Replication', 'Removal'] )
self.transformationTypes = sortList( dataProc + dataManip )
self.log.info( "Will consider the following transformation types: %s" % str( self.transformationTypes ) )
# # directory locations
self.directoryLocations = sortList( self.am_getOption( 'DirectoryLocations', [ 'TransformationDB',
'MetadataCatalog' ] ) )
self.log.info( "Will search for directories in the following locations: %s" % str( self.directoryLocations ) )
# # transformation metadata
self.transfidmeta = self.am_getOption( 'TransfIDMeta', "TransformationID" )
self.log.info( "Will use %s as metadata tag name for TransformationID" % self.transfidmeta )
# # archive periof in days
self.archiveAfter = self.am_getOption( 'ArchiveAfter', 7 ) # days
self.log.info( "Will archive Completed transformations after %d days" % self.archiveAfter )
# # active SEs
self.activeStorages = sortList( self.am_getOption( 'ActiveSEs', [] ) )
self.log.info( "Will check the following storage elements: %s" % str( self.activeStorages ) )
# # transformation log SEs
self.logSE = self.am_getOption( 'TransformationLogSE', 'LogSE' )
self.log.info( "Will remove logs found on storage element: %s" % self.logSE )
# # enable/disable execution, should be using CS option Status?? with default value as 'Active'??
self.enableFlag = self.am_getOption( 'EnableFlag', 'True' )
return S_OK()
#############################################################################
def execute( self ):
''' execution in one agent's cycle
:param self: self reference
'''
self.enableFlag = self.am_getOption( 'EnableFlag', 'True' )
if not self.enableFlag == 'True':
self.log.info( 'TransformationCleaningAgent is disabled by configuration option EnableFlag' )
return S_OK( 'Disabled via CS flag' )
# # Obtain the transformations in Cleaning status and remove any mention of the jobs/files
res = self.transClient.getTransformations( { 'Status' : 'Cleaning',
'Type' : self.transformationTypes } )
if res['OK']:
for transDict in res['Value']:
# # if transformation is of type `Replication` or `Removal`, there is nothing to clean.
# # We just archive
#.........这里部分代码省略.........
示例13: WorkflowTasks
class WorkflowTasks(TaskBase):
""" Handles jobs
"""
def __init__(self, transClient=None, logger=None, submissionClient=None, jobMonitoringClient=None,
outputDataModule=None, jobClass=None, opsH=None, destinationPlugin=None,
ownerDN=None, ownerGroup=None):
""" Generates some default objects.
jobClass is by default "DIRAC.Interfaces.API.Job.Job". An extension of it also works:
VOs can pass in their job class extension, if present
"""
if not logger:
logger = gLogger.getSubLogger('WorkflowTasks')
super(WorkflowTasks, self).__init__(transClient, logger)
useCertificates = True if (bool(ownerDN) and bool(ownerGroup)) else False
if not submissionClient:
self.submissionClient = WMSClient(useCertificates=useCertificates,
delegatedDN=ownerDN,
delegatedGroup=ownerGroup)
else:
self.submissionClient = submissionClient
if not jobMonitoringClient:
self.jobMonitoringClient = JobMonitoringClient()
else:
self.jobMonitoringClient = jobMonitoringClient
if not jobClass:
self.jobClass = Job
else:
self.jobClass = jobClass
if not opsH:
self.opsH = Operations()
else:
self.opsH = opsH
if not outputDataModule:
self.outputDataModule = self.opsH.getValue("Transformations/OutputDataModule", "")
else:
self.outputDataModule = outputDataModule
if not destinationPlugin:
self.destinationPlugin = self.opsH.getValue('Transformations/DestinationPlugin', 'BySE')
else:
self.destinationPlugin = destinationPlugin
self.destinationPlugin_o = None
self.outputDataModule_o = None
def prepareTransformationTasks(self, transBody, taskDict, owner='', ownerGroup='',
ownerDN='', bulkSubmissionFlag=False):
""" Prepare tasks, given a taskDict, that is created (with some manipulation) by the DB
jobClass is by default "DIRAC.Interfaces.API.Job.Job". An extension of it also works.
:param transBody: transformation job template
:param taskDict: dictionary of per task parameters
:param owner: owner of the transformation
:param ownerGroup: group of the owner of the transformation
:param ownerDN: DN of the owner of the transformation
:return: S_OK/S_ERROR with updated taskDict
"""
if (not owner) or (not ownerGroup):
res = getProxyInfo(False, False)
if not res['OK']:
return res
proxyInfo = res['Value']
owner = proxyInfo['username']
ownerGroup = proxyInfo['group']
if not ownerDN:
res = getDNForUsername(owner)
if not res['OK']:
return res
ownerDN = res['Value'][0]
if bulkSubmissionFlag:
return self.__prepareTasksBulk(transBody, taskDict, owner, ownerGroup, ownerDN)
return self.__prepareTasks(transBody, taskDict, owner, ownerGroup, ownerDN)
def __prepareTasksBulk(self, transBody, taskDict, owner, ownerGroup, ownerDN):
""" Prepare transformation tasks with a single job object for bulk submission
"""
if taskDict:
transID = taskDict.values()[0]['TransformationID']
else:
return S_OK({})
# Prepare the bulk Job object with common parameters
oJob = self.jobClass(transBody)
method = 'prepareTransformationTasksBulk'
self._logVerbose('Setting job owner:group to %s:%s' % (owner, ownerGroup),
transID=transID, method=method)
oJob.setOwner(owner)
#.........这里部分代码省略.........
示例14: TransformationCleaningAgent
class TransformationCleaningAgent( AgentModule ):
#############################################################################
def initialize( self ):
"""Sets defaults """
self.replicaManager = ReplicaManager()
self.transClient = TransformationClient()
self.wmsClient = WMSClient()
self.requestClient = RequestClient()
self.metadataClient = FileCatalogClient()
self.storageUsageClient = StorageUsageClient()
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/DataManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption( 'shifterProxy', 'DataManager' )
self.transformationTypes = sortList( self.am_getOption( 'TransformationTypes', ['MCSimulation', 'DataReconstruction', 'DataStripping', 'MCStripping', 'Merge', 'Replication'] ) )
gLogger.info( "Will consider the following transformation types: %s" % str( self.transformationTypes ) )
self.directoryLocations = sortList( self.am_getOption( 'DirectoryLocations', ['TransformationDB', 'StorageUsage', 'MetadataCatalog'] ) )
gLogger.info( "Will search for directories in the following locations: %s" % str( self.directoryLocations ) )
self.transfidmeta = self.am_getOption( 'TransfIDMeta', "TransformationID" )
gLogger.info( "Will use %s as metadata tag name for TransformationID" % self.transfidmeta )
self.archiveAfter = self.am_getOption( 'ArchiveAfter', 7 ) # days
gLogger.info( "Will archive Completed transformations after %d days" % self.archiveAfter )
self.activeStorages = sortList( self.am_getOption( 'ActiveSEs', [] ) )
gLogger.info( "Will check the following storage elements: %s" % str( self.activeStorages ) )
self.logSE = self.am_getOption( 'TransformationLogSE', 'LogSE' )
gLogger.info( "Will remove logs found on storage element: %s" % self.logSE )
return S_OK()
#############################################################################
def execute( self ):
""" The TransformationCleaningAgent execution method.
"""
self.enableFlag = self.am_getOption( 'EnableFlag', 'True' )
if not self.enableFlag == 'True':
self.log.info( 'TransformationCleaningAgent is disabled by configuration option %s/EnableFlag' % ( self.section ) )
return S_OK( 'Disabled via CS flag' )
# Obtain the transformations in Cleaning status and remove any mention of the jobs/files
res = self.transClient.getTransformations( {'Status':'Cleaning', 'Type':self.transformationTypes} )
if res['OK']:
for transDict in res['Value']:
self.cleanTransformation( transDict['TransformationID'] )
# Obtain the transformations in RemovingFiles status and (wait for it) removes the output files
res = self.transClient.getTransformations( {'Status':'RemovingFiles', 'Type':self.transformationTypes} )
if res['OK']:
for transDict in res['Value']:
self.removeTransformationOutput( transDict['TransformationID'] )
# Obtain the transformations in Completed status and archive if inactive for X days
olderThanTime = datetime.utcnow() - timedelta( days = self.archiveAfter )
res = self.transClient.getTransformations( {'Status':'Completed', 'Type':self.transformationTypes}, older = olderThanTime )
if res['OK']:
for transDict in res['Value']:
self.archiveTransformation( transDict['TransformationID'] )
return S_OK()
#############################################################################
#
# Get the transformation directories for checking
#
def getTransformationDirectories( self, transID ):
""" Get the directories for the supplied transformation from the transformation system """
directories = []
if 'TransformationDB' in self.directoryLocations:
res = self.transClient.getTransformationParameters( transID, ['OutputDirectories'] )
if not res['OK']:
gLogger.error( "Failed to obtain transformation directories", res['Message'] )
return res
transDirectories = res['Value'].splitlines()
directories = self.__addDirs( transID, transDirectories, directories )
if 'StorageUsage' in self.directoryLocations:
res = self.storageUsageClient.getStorageDirectories( '', '', transID, [] )
if not res['OK']:
gLogger.error( "Failed to obtain storage usage directories", res['Message'] )
return res
transDirectories = res['Value']
directories = self.__addDirs( transID, transDirectories, directories )
if 'MetadataCatalog' in self.directoryLocations:
res = self.metadataClient.findDirectoriesByMetadata( {self.transfidmeta:transID} )
if not res['OK']:
gLogger.error( "Failed to obtain metadata catalog directories", res['Message'] )
return res
transDirectories = res['Value']
directories = self.__addDirs( transID, transDirectories, directories )
if not directories:
gLogger.info( "No output directories found" )
directories = sortList( directories )
return S_OK( directories )
def __addDirs( self, transID, newDirs, existingDirs ):
for dir in newDirs:
transStr = str( transID ).zfill( 8 )
#.........这里部分代码省略.........
示例15: test_FullChain
def test_FullChain( self ):
""" This test will
- call all the WMSClient methods
that will end up calling all the JobManager service methods
- use the JobMonitoring to verify few properties
- call the JobCleaningAgent to eliminate job entries from the DBs
"""
wmsClient = WMSClient()
jobMonitor = JobMonitoringClient()
jobStateUpdate = RPCClient( 'WorkloadManagement/JobStateUpdate' )
# create the job
job = helloWorldJob()
jobDescription = createFile( job )
# submit the job
res = wmsClient.submitJob( job._toJDL( xmlFile = jobDescription ) )
self.assert_( res['OK'] )
# self.assertEqual( type( res['Value'] ), int )
# self.assertEqual( res['Value'], res['JobID'] )
# jobID = res['JobID']
jobID = res['Value']
# updating the status
jobStateUpdate.setJobStatus( jobID, 'Running', 'Executing Minchiapp', 'source' )
# reset the job
res = wmsClient.resetJob( jobID )
self.assert_( res['OK'] )
# reschedule the job
res = wmsClient.rescheduleJob( jobID )
self.assert_( res['OK'] )
res = jobMonitor.getJobStatus( jobID )
self.assert_( res['OK'] )
self.assertEqual( res['Value'], 'Received' )
# updating the status again
jobStateUpdate.setJobStatus( jobID, 'Matched', 'matching', 'source' )
# kill the job
res = wmsClient.killJob( jobID )
self.assert_( res['OK'] )
res = jobMonitor.getJobStatus( jobID )
self.assert_( res['OK'] )
self.assertEqual( res['Value'], 'Killed' )
# updating the status aaaagain
jobStateUpdate.setJobStatus( jobID, 'Done', 'matching', 'source' )
# kill the job
res = wmsClient.killJob( jobID )
self.assert_( res['OK'] )
res = jobMonitor.getJobStatus( jobID )
self.assert_( res['OK'] )
self.assertEqual( res['Value'], 'Done' ) # this time it won't kill... it's done!
# delete the job - this will just set its status to "deleted"
res = wmsClient.deleteJob( jobID )
self.assert_( res['OK'] )
res = jobMonitor.getJobStatus( jobID )
self.assert_( res['OK'] )
self.assertEqual( res['Value'], 'Deleted' )