本文整理汇总了Python中DIRAC.DataManagementSystem.Client.DataIntegrityClient.DataIntegrityClient类的典型用法代码示例。如果您正苦于以下问题:Python DataIntegrityClient类的具体用法?Python DataIntegrityClient怎么用?Python DataIntegrityClient使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DataIntegrityClient类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: initialize
def initialize( self ):
self.fileCatalog = FileCatalog()
#self.stagerClient = StorageManagerClient()
self.dataIntegrityClient = DataIntegrityClient()
self.storageDB = StorageManagementDB()
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/DataManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption( 'shifterProxy', 'DataManager' )
return S_OK()
示例2: initialize
def initialize( self ):
self.stagerClient = StorageManagerClient()
self.dataIntegrityClient = DataIntegrityClient()
#self.storageDB = StorageManagementDB()
# pin lifetime = 1 day
self.pinLifetime = self.am_getOption( 'PinLifetime', THROTTLING_TIME )
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/DataManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption( 'shifterProxy', 'DataManager' )
return S_OK()
示例3: __init__
def __init__( self, *args, **kwargs ):
""" c'tor
"""
AgentModule.__init__( self, *args, **kwargs )
self.integrityClient = DataIntegrityClient()
self.replicaManager = ReplicaManager()
self.transClient = TransformationClient()
self.fileCatalogClient = FileCatalogClient()
agentTSTypes = self.am_getOption( 'TransformationTypes', [] )
if agentTSTypes:
self.transformationTypes = agentTSTypes
else:
self.transformationTypes = Operations().getValue( 'Transformations/DataProcessing', ['MCSimulation', 'Merge'] )
self.directoryLocations = sortList( self.am_getOption( 'DirectoryLocations', ['TransformationDB', 'MetadataCatalog'] ) )
self.activeStorages = sortList( self.am_getOption( 'ActiveSEs', [] ) )
self.transfidmeta = self.am_getOption( 'TransfIDMeta', "TransformationID" )
示例4: __init__
def __init__(self, *args, **kwargs):
""" c'tor
"""
AgentModule.__init__(self, *args, **kwargs)
self.consistencyInspector = ConsistencyInspector()
self.integrityClient = DataIntegrityClient()
self.fc = FileCatalog()
self.transClient = TransformationClient()
self.fileCatalogClient = FileCatalogClient()
agentTSTypes = self.am_getOption('TransformationTypes', [])
if agentTSTypes:
self.transformationTypes = agentTSTypes
else:
self.transformationTypes = Operations().getValue('Transformations/DataProcessing', ['MCSimulation', 'Merge'])
self.directoryLocations = sorted(self.am_getOption('DirectoryLocations', ['TransformationDB',
'MetadataCatalog']))
self.transfidmeta = self.am_getOption('TransfIDMeta', "TransformationID")
self.enableFlag = True
示例5: initialize
def initialize( self ):
"""Sets defaults
"""
self.integrityClient = DataIntegrityClient()
self.replicaManager = ReplicaManager()
self.transClient = TransformationClient()
self.fileCatalogClient = FileCatalogClient()
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/DataManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption( 'shifterProxy', 'DataManager' )
self.transformationTypes = sortList( self.am_getOption( 'TransformationTypes', ['MCSimulation', 'DataReconstruction', 'DataStripping', 'MCStripping', 'Merge'] ) )
gLogger.info( "Will treat the following transformation types: %s" % str( self.transformationTypes ) )
self.directoryLocations = sortList( self.am_getOption( 'DirectoryLocations', ['TransformationDB', 'MetadataCatalog'] ) )
gLogger.info( "Will search for directories in the following locations: %s" % str( self.directoryLocations ) )
self.activeStorages = sortList( self.am_getOption( 'ActiveSEs', [] ) )
gLogger.info( "Will check the following storage elements: %s" % str( self.activeStorages ) )
self.transfidmeta = self.am_getOption( 'TransfIDMeta', "TransformationID" )
gLogger.info( "Will use %s as metadata tag name for TransformationID" % self.transfidmeta )
return S_OK()
示例6: initialize
def initialize( self ):
self.ReplicaManager = ReplicaManager()
self.DataLog = DataLoggingClient()
self.DataIntegrityClient = DataIntegrityClient()
if self.am_getOption( 'DirectDB', False ):
from DIRAC.StorageManagementSystem.DB.MigrationMonitoringDB import MigrationMonitoringDB
self.MigrationMonitoringDB = MigrationMonitoringDB()
else:
from DIRAC.StorageManagementSystem.Client.MigrationMonitoringClient import MigrationMonitoringClient
self.MigrationMonitoringDB = MigrationMonitoringClient()
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/DataManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption( 'shifterProxy', 'DataManager' )
self.userName = 'acsmith'
self.storageElements = self.am_getOption( 'StorageElements', ['CERN-RAW'] )
self.lastMonitors = {}
gMonitor.registerActivity( "Iteration", "Agent Loops/min", "MigrationMonitoringAgent", "Loops", gMonitor.OP_SUM )
if self.storageElements:
gLogger.info( "Agent will be initialised to monitor the following SEs:" )
for se in self.storageElements:
gLogger.info( se )
self.lastMonitors[se] = datetime.datetime.utcfromtimestamp( 0.0 )
gMonitor.registerActivity( "Iteration%s" % se, "Agent Loops/min", "MigrationMonitoringAgent", "Loops", gMonitor.OP_SUM )
gMonitor.registerActivity( "MigratingFiles%s" % se, "Files waiting for migration", "MigrationMonitoringAgent", "Files", gMonitor.OP_MEAN )
gMonitor.registerActivity( "MigratedFiles%s" % se, "Newly migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_SUM )
gMonitor.registerActivity( "TotalMigratedFiles%s" % se, "Total migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_ACUM )
gMonitor.registerActivity( "TotalMigratedSize%s" % se, "Total migrated file size", "MigrationMonitoringAgent", "GB", gMonitor.OP_ACUM )
gMonitor.registerActivity( "ChecksumMatches%s" % se, "Successfully migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_SUM )
gMonitor.registerActivity( "TotalChecksumMatches%s" % se, "Total successfully migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_ACUM )
gMonitor.registerActivity( "ChecksumMismatches%s" % se, "Erroneously migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_SUM )
gMonitor.registerActivity( "TotalChecksumMismatches%s" % se, "Total erroneously migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_ACUM )
gMonitor.registerActivity( "MigrationTime%s" % se, "Average migration time", "MigrationMonitoringAgent", "Seconds", gMonitor.OP_MEAN )
return S_OK()
示例7: __init__
def __init__(self, *args, **kwargs):
""" c'tor
"""
AgentModule.__init__(self, *args, **kwargs)
self.integrityClient = DataIntegrityClient()
self.replicaManager = ReplicaManager()
self.transClient = TransformationClient()
self.fileCatalogClient = FileCatalogClient()
agentTSTypes = self.am_getOption("TransformationTypes", [])
if agentTSTypes:
self.transformationTypes = agentTSTypes
else:
self.transformationTypes = Operations().getValue(
"Transformations/DataProcessing", ["MCSimulation", "Merge"]
)
self.directoryLocations = sortList(
self.am_getOption("DirectoryLocations", ["TransformationDB", "MetadataCatalog"])
)
self.activeStorages = sortList(self.am_getOption("ActiveSEs", []))
self.transfidmeta = self.am_getOption("TransfIDMeta", "TransformationID")
self.enableFlag = True
示例8: StageRequestAgent
class StageRequestAgent( AgentModule ):
def initialize( self ):
self.replicaManager = ReplicaManager()
self.stagerClient = StorageManagerClient()
self.dataIntegrityClient = DataIntegrityClient()
#self.storageDB = StorageManagementDB()
# pin lifetime = 1 day
self.pinLifetime = self.am_getOption( 'PinLifetime', THROTTLING_TIME )
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/DataManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption( 'shifterProxy', 'DataManager' )
return S_OK()
def execute( self ):
# Get the current submitted stage space and the amount of pinned space for each storage element
res = self.getStorageUsage()
if not res['OK']:
return res
return self.submitStageRequests()
def getStorageUsage( self ):
""" Fill the current Status of the SE Caches from the DB
"""
self.storageElementCache = {}
res = self.stagerClient.getSubmittedStagePins()
if not res['OK']:
gLogger.fatal( "StageRequest.getStorageUsage: Failed to obtain submitted requests from StorageManagementDB.", res['Message'] )
return res
self.storageElementUsage = res['Value']
if self.storageElementUsage:
gLogger.info( "StageRequest.getStorageUsage: Active stage/pin requests found at the following sites:" )
for storageElement in sortList( self.storageElementUsage.keys() ):
seDict = self.storageElementUsage[storageElement]
# Convert to GB for printout
seDict['TotalSize'] = seDict['TotalSize'] / ( 1000 * 1000 * 1000.0 )
gLogger.info( "StageRequest.getStorageUsage: %s: %s replicas with a size of %.3f GB." %
( storageElement.ljust( 15 ), str( seDict['Replicas'] ).rjust( 6 ), seDict['TotalSize'] ) )
if not self.storageElementUsage:
gLogger.info( "StageRequest.getStorageUsage: No active stage/pin requests found." )
return S_OK()
def submitStageRequests( self ):
""" This manages the following transitions of the Replicas
* Waiting -> Offline (if the file is not found Cached)
* Waiting -> StageSubmitted (if the file is found Cached)
* Offline -> StageSubmitted (if there are not more Waiting replicas)
"""
# Retry Replicas that have not been Staged in a previous attempt
res = self._getMissingReplicas()
if not res['OK']:
gLogger.fatal( "StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.", res['Message'] )
return res
seReplicas = res['Value']['SEReplicas']
allReplicaInfo = res['Value']['AllReplicaInfo']
if seReplicas:
gLogger.info( "StageRequest.submitStageRequests: Completing partially Staged Tasks" )
for storageElement, seReplicaIDs in seReplicas.items():
gLogger.debug( 'Staging at %s:' % storageElement, seReplicaIDs )
self._issuePrestageRequests( storageElement, seReplicaIDs, allReplicaInfo )
# Check Waiting Replicas and select those found Online and all other Replicas from the same Tasks
res = self._getOnlineReplicas()
if not res['OK']:
gLogger.fatal( "StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.", res['Message'] )
return res
seReplicas = res['Value']['SEReplicas']
allReplicaInfo = res['Value']['AllReplicaInfo']
# Check Offline Replicas that fit in the Cache and all other Replicas from the same Tasks
res = self._getOfflineReplicas()
if not res['OK']:
gLogger.fatal( "StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.", res['Message'] )
return res
# Merge info from both results
for storageElement, seReplicaIDs in res['Value']['SEReplicas'].items():
if storageElement not in seReplicas:
seReplicas[storageElement] = seReplicaIDs
else:
for replicaID in seReplicaIDs:
if replicaID not in seReplicas[storageElement]:
seReplicas[storageElement].append( replicaID )
allReplicaInfo.update( res['Value']['AllReplicaInfo'] )
gLogger.info( "StageRequest.submitStageRequests: Obtained %s replicas for staging." % len( allReplicaInfo ) )
for storageElement, seReplicaIDs in seReplicas.items():
gLogger.debug( 'Staging at %s:' % storageElement, seReplicaIDs )
self._issuePrestageRequests( storageElement, seReplicaIDs, allReplicaInfo )
return S_OK()
#.........这里部分代码省略.........
示例9: RequestPreparationAgent
class RequestPreparationAgent( AgentModule ):
def initialize( self ):
self.fileCatalog = FileCatalog()
self.dm = DataManager()
self.stagerClient = StorageManagerClient()
self.dataIntegrityClient = DataIntegrityClient()
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/DataManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption( 'shifterProxy', 'DataManager' )
return S_OK()
def execute( self ):
""" This is the first logical task to be executed and manages the New->Waiting transition of the Replicas
"""
res = self.__getNewReplicas()
if not res['OK']:
gLogger.fatal( "RequestPreparation.prepareNewReplicas: Failed to get replicas from StagerDB.", res['Message'] )
return res
if not res['Value']:
gLogger.info( "There were no New replicas found" )
return res
replicas = res['Value']['Replicas']
replicaIDs = res['Value']['ReplicaIDs']
gLogger.info( "RequestPreparation.prepareNewReplicas: Obtained %s New replicas for preparation." % len( replicaIDs ) )
# Check if the files exist in the FileCatalog
res = self.__getExistingFiles( replicas )
if not res['OK']:
return res
exist = res['Value']['Exist']
terminal = res['Value']['Missing']
failed = res['Value']['Failed']
if not exist:
gLogger.error( 'RequestPreparation.prepareNewReplicas: Failed to determine the existence of any file' )
return S_OK()
terminalReplicaIDs = {}
for lfn, reason in terminal.items():
for replicaID in replicas[lfn].values():
terminalReplicaIDs[replicaID] = reason
replicas.pop( lfn )
gLogger.info( "RequestPreparation.prepareNewReplicas: %s files exist in the FileCatalog." % len( exist ) )
if terminal:
gLogger.info( "RequestPreparation.prepareNewReplicas: %s files do not exist in the FileCatalog." % len( terminal ) )
# Obtain the file sizes from the FileCatalog
res = self.__getFileSize( exist )
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
terminal = res['Value']['ZeroSize']
fileSizes = res['Value']['FileSizes']
if not fileSizes:
gLogger.error( 'RequestPreparation.prepareNewReplicas: Failed determine sizes of any files' )
return S_OK()
for lfn, reason in terminal.items():
for _se, replicaID in replicas[lfn].items():
terminalReplicaIDs[replicaID] = reason
replicas.pop( lfn )
gLogger.info( "RequestPreparation.prepareNewReplicas: Obtained %s file sizes from the FileCatalog." % len( fileSizes ) )
if terminal:
gLogger.info( "RequestPreparation.prepareNewReplicas: %s files registered with zero size in the FileCatalog." % len( terminal ) )
# Obtain the replicas from the FileCatalog
res = self.__getFileReplicas( fileSizes.keys() )
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
terminal = res['Value']['ZeroReplicas']
fileReplicas = res['Value']['Replicas']
if not fileReplicas:
gLogger.error( 'RequestPreparation.prepareNewReplicas: Failed determine replicas for any files' )
return S_OK()
for lfn, reason in terminal.items():
for _se, replicaID in replicas[lfn].items():
terminalReplicaIDs[replicaID] = reason
replicas.pop( lfn )
gLogger.info( "RequestPreparation.prepareNewReplicas: Obtained replica information for %s file from the FileCatalog." % len( fileReplicas ) )
if terminal:
gLogger.info( "RequestPreparation.prepareNewReplicas: %s files registered with zero replicas in the FileCatalog." % len( terminal ) )
# Check the replicas exist at the requested site
replicaMetadata = []
for lfn, requestedSEs in replicas.items():
lfnReplicas = fileReplicas.get( lfn )
# This should not happen in principle, but it was seen
# after a corrupted staging request has entered the DB
if not lfnReplicas:
gLogger.error( "Missing replicas information", "%s %s" % ( lfn, requestedSEs ) )
continue
for requestedSE, replicaID in requestedSEs.items():
if not requestedSE in lfnReplicas.keys():
terminalReplicaIDs[replicaID] = "LFN not registered at requested SE"
replicas[lfn].pop( requestedSE )
else:
replicaMetadata.append( ( replicaID, lfnReplicas[requestedSE], fileSizes[lfn] ) )
#.........这里部分代码省略.........
示例10: StageMonitorAgent
class StageMonitorAgent( AgentModule ):
def initialize( self ):
self.replicaManager = ReplicaManager()
#self.stagerClient = StorageManagerClient()
self.dataIntegrityClient = DataIntegrityClient()
self.storageDB = StorageManagementDB()
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/DataManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption( 'shifterProxy', 'DataManager' )
return S_OK()
def execute( self ):
res = self.monitorStageRequests()
return res
def monitorStageRequests( self ):
""" This is the third logical task manages the StageSubmitted->Staged transition of the Replicas
"""
res = self.__getStageSubmittedReplicas()
if not res['OK']:
gLogger.fatal( "StageMonitor.monitorStageRequests: Failed to get replicas from StorageManagementDB.", res['Message'] )
return res
if not res['Value']:
gLogger.info( "StageMonitor.monitorStageRequests: There were no StageSubmitted replicas found" )
return res
seReplicas = res['Value']['SEReplicas']
replicaIDs = res['Value']['ReplicaIDs']
gLogger.info( "StageMonitor.monitorStageRequests: Obtained %s StageSubmitted replicas for monitoring." % len( replicaIDs ) )
for storageElement, seReplicaIDs in seReplicas.items():
self.__monitorStorageElementStageRequests( storageElement, seReplicaIDs, replicaIDs )
return S_OK()
def __monitorStorageElementStageRequests( self, storageElement, seReplicaIDs, replicaIDs ):
terminalReplicaIDs = {}
stagedReplicas = []
pfnRepIDs = {}
pfnReqIDs = {}
for replicaID in seReplicaIDs:
pfn = replicaIDs[replicaID]['PFN']
pfnRepIDs[pfn] = replicaID
requestID = replicaIDs[replicaID].get( 'RequestID', None )
if requestID:
pfnReqIDs[pfn] = replicaIDs[replicaID]['RequestID']
gLogger.info( "StageMonitor.__monitorStorageElementStageRequests: Monitoring %s stage requests for %s." % ( len( pfnRepIDs ), storageElement ) )
res = self.replicaManager.getStorageFileMetadata( pfnReqIDs.keys(), storageElement )
if not res['OK']:
gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: Completely failed to monitor stage requests for replicas.", res['Message'] )
return
prestageStatus = res['Value']
for pfn, reason in prestageStatus['Failed'].items():
if re.search( 'File does not exist', reason ):
gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: PFN did not exist in the StorageElement", pfn )
terminalReplicaIDs[pfnRepIDs[pfn]] = 'PFN did not exist in the StorageElement'
for pfn, staged in prestageStatus['Successful'].items():
if staged and 'Cached' in staged and staged['Cached']:
stagedReplicas.append( pfnRepIDs[pfn] )
# Update the states of the replicas in the database
if terminalReplicaIDs:
gLogger.info( "StageMonitor.__monitorStorageElementStageRequests: %s replicas are terminally failed." % len( terminalReplicaIDs ) )
res = self.storageDB.updateReplicaFailure( terminalReplicaIDs )
if not res['OK']:
gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: Failed to update replica failures.", res['Message'] )
if stagedReplicas:
gLogger.info( "StageMonitor.__monitorStorageElementStageRequests: %s staged replicas to be updated." % len( stagedReplicas ) )
res = self.storageDB.setStageComplete( stagedReplicas )
if not res['OK']:
gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: Failed to updated staged replicas.", res['Message'] )
res = self.storageDB.updateReplicaStatus( stagedReplicas, 'Staged' )
if not res['OK']:
gLogger.error( "StageRequest.__monitorStorageElementStageRequests: Failed to insert replica status.", res['Message'] )
return
def __getStageSubmittedReplicas( self ):
""" This obtains the StageSubmitted replicas from the Replicas table and the RequestID from the StageRequests table """
res = self.storageDB.getCacheReplicas( {'Status':'StageSubmitted'} )
if not res['OK']:
gLogger.error( "StageRequest.__getStageSubmittedReplicas: Failed to get replicas with StageSubmitted status.", res['Message'] )
return res
if not res['Value']:
gLogger.debug( "StageRequest.__getStageSubmittedReplicas: No StageSubmitted replicas found to process." )
return S_OK()
else:
gLogger.debug( "StageRequest.__getStageSubmittedReplicas: Obtained %s StageSubmitted replicas(s) to process." % len( res['Value'] ) )
seReplicas = {}
replicaIDs = res['Value']
for replicaID, info in replicaIDs.items():
storageElement = info['SE']
if not seReplicas.has_key( storageElement ):
seReplicas[storageElement] = []
seReplicas[storageElement].append( replicaID )
# RequestID was missing from replicaIDs dictionary BUGGY?
res = self.storageDB.getStageRequests( {'ReplicaID':replicaIDs.keys()} )
#.........这里部分代码省略.........
示例11: ValidateOutputDataAgent
class ValidateOutputDataAgent( AgentModule ):
def __init__( self, *args, **kwargs ):
""" c'tor
"""
AgentModule.__init__( self, *args, **kwargs )
self.integrityClient = DataIntegrityClient()
self.fc = FileCatalog()
self.transClient = TransformationClient()
self.fileCatalogClient = FileCatalogClient()
agentTSTypes = self.am_getOption( 'TransformationTypes', [] )
if agentTSTypes:
self.transformationTypes = agentTSTypes
else:
self.transformationTypes = Operations().getValue( 'Transformations/DataProcessing', ['MCSimulation', 'Merge'] )
self.directoryLocations = sorted( self.am_getOption( 'DirectoryLocations', ['TransformationDB',
'MetadataCatalog'] ) )
self.activeStorages = sorted( self.am_getOption( 'ActiveSEs', [] ) )
self.transfidmeta = self.am_getOption( 'TransfIDMeta', "TransformationID" )
self.enableFlag = True
#############################################################################
def initialize( self ):
""" Sets defaults
"""
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/DataManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption( 'shifterProxy', 'DataManager' )
gLogger.info( "Will treat the following transformation types: %s" % str( self.transformationTypes ) )
gLogger.info( "Will search for directories in the following locations: %s" % str( self.directoryLocations ) )
gLogger.info( "Will check the following storage elements: %s" % str( self.activeStorages ) )
gLogger.info( "Will use %s as metadata tag name for TransformationID" % self.transfidmeta )
return S_OK()
#############################################################################
def execute( self ):
""" The VerifyOutputData execution method
"""
self.enableFlag = self.am_getOption( 'EnableFlag', 'True' )
if not self.enableFlag == 'True':
self.log.info( "VerifyOutputData is disabled by configuration option 'EnableFlag'" )
return S_OK( 'Disabled via CS flag' )
gLogger.info( "-" * 40 )
self.updateWaitingIntegrity()
gLogger.info( "-" * 40 )
res = self.transClient.getTransformations( {'Status':'ValidatingOutput', 'Type':self.transformationTypes} )
if not res['OK']:
gLogger.error( "Failed to get ValidatingOutput transformations", res['Message'] )
return res
transDicts = res['Value']
if not transDicts:
gLogger.info( "No transformations found in ValidatingOutput status" )
return S_OK()
gLogger.info( "Found %s transformations in ValidatingOutput status" % len( transDicts ) )
for transDict in transDicts:
transID = transDict['TransformationID']
res = self.checkTransformationIntegrity( int( transID ) )
if not res['OK']:
gLogger.error( "Failed to perform full integrity check for transformation %d" % transID )
else:
self.finalizeCheck( transID )
gLogger.info( "-" * 40 )
return S_OK()
def updateWaitingIntegrity( self ):
""" Get 'WaitingIntegrity' transformations, update to 'ValidatedOutput'
"""
gLogger.info( "Looking for transformations in the WaitingIntegrity status to update" )
res = self.transClient.getTransformations( {'Status':'WaitingIntegrity'} )
if not res['OK']:
gLogger.error( "Failed to get WaitingIntegrity transformations", res['Message'] )
return res
transDicts = res['Value']
if not transDicts:
gLogger.info( "No transformations found in WaitingIntegrity status" )
return S_OK()
gLogger.info( "Found %s transformations in WaitingIntegrity status" % len( transDicts ) )
for transDict in transDicts:
transID = transDict['TransformationID']
gLogger.info( "-" * 40 )
res = self.integrityClient.getTransformationProblematics( int( transID ) )
if not res['OK']:
gLogger.error( "Failed to determine waiting problematics for transformation", res['Message'] )
elif not res['Value']:
res = self.transClient.setTransformationParameter( transID, 'Status', 'ValidatedOutput' )
if not res['OK']:
gLogger.error( "Failed to update status of transformation %s to ValidatedOutput" % ( transID ) )
else:
gLogger.info( "Updated status of transformation %s to ValidatedOutput" % ( transID ) )
else:
gLogger.info( "%d problematic files for transformation %s were found" % ( len( res['Value'] ), transID ) )
#.........这里部分代码省略.........
示例12: StageRequestAgent
class StageRequestAgent( AgentModule ):
def initialize( self ):
self.replicaManager = ReplicaManager()
#self.stagerClient = StorageManagerClient()
self.dataIntegrityClient = DataIntegrityClient()
self.storageDB = StorageManagementDB()
# pin lifetime = 1 day
self.pinLifetime = self.am_getOption( 'PinLifetime', 60 * 60 * 24 )
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/DataManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption( 'shifterProxy', 'DataManager' )
return S_OK()
def execute( self ):
# Get the current submitted stage space and the amount of pinned space for each storage element
res = self.storageDB.getSubmittedStagePins()
if not res['OK']:
gLogger.fatal( "StageRequest.submitStageRequests: Failed to obtain submitted requests from StorageManagementDB.", res['Message'] )
return res
self.storageElementUsage = res['Value']
if self.storageElementUsage:
gLogger.info( "StageRequest.execute: Active stage/pin requests found at the following sites:" )
for storageElement in sortList( self.storageElementUsage.keys() ):
seDict = self.storageElementUsage[storageElement]
# Daniela: fishy? Changed it to GB and division by 1024 instead of 1000
gLogger.info( "StageRequest.execute: %s: %s replicas with a size of %.3f GB." % ( storageElement.ljust( 15 ), str( seDict['Replicas'] ).rjust( 6 ), seDict['TotalSize'] / ( 1024 * 1024 * 1024.0 ) ) )
if not self.storageElementUsage:
gLogger.info( "StageRequest.execute: No active stage/pin requests found." )
res = self.submitStageRequests()
return res
def submitStageRequests( self ):
""" This manages the Waiting->StageSubmitted transition of the Replicas
"""
res = self.__getWaitingReplicas()
if not res['OK']:
gLogger.fatal( "StageRequest.submitStageRequests: Failed to get replicas from StorageManagementDB.", res['Message'] )
return res
if not res['Value']:
gLogger.info( "StageRequest.submitStageRequests: There were no Waiting replicas found" )
return res
seReplicas = res['Value']['SEReplicas']
allReplicaInfo = res['Value']['ReplicaIDs']
gLogger.info( "StageRequest.submitStageRequests: Obtained %s replicas Waiting for staging." % len( allReplicaInfo ) )
for storageElement, seReplicaIDs in seReplicas.items():
self.__issuePrestageRequests( storageElement, seReplicaIDs, allReplicaInfo )
return S_OK()
def __issuePrestageRequests( self, storageElement, seReplicaIDs, allReplicaInfo ):
# First select which files can be eligible for prestaging based on the available space
usedSpace = 0
if self.storageElementUsage.has_key( storageElement ):
usedSpace = self.storageElementUsage[storageElement]['TotalSize']
totalSpace = gConfig.getValue( "/Resources/StorageElements/%s/CacheSize" % storageElement, 0 )
if not totalSpace:
gLogger.info( "StageRequest__issuePrestageRequests: No space restriction at %s" % ( storageElement ) )
selectedReplicaIDs = seReplicaIDs
elif ( totalSpace > usedSpace ):
gLogger.debug( "StageRequest__issuePrestageRequests: total space = %s, used space = %s" % ( totalSpace, usedSpace ) )
gLogger.info( "StageRequest__issuePrestageRequests: %.4f GB available at %s" % ( ( totalSpace - usedSpace ) / ( 1024 * 1024 * 1024.0 ), storageElement ) )
selectedReplicaIDs = []
#logic was bad here, before the first comparison test, the single selected file for staging could be larger than the available space
for replicaID in seReplicaIDs:
if ( totalSpace - usedSpace ) > allReplicaInfo[replicaID]['Size']:
usedSpace += allReplicaInfo[replicaID]['Size']
selectedReplicaIDs.append( replicaID )
else:
gLogger.info( "StageRequest__issuePrestageRequests: %.2f GB used at %s (limit %2.f GB)" % ( ( usedSpace ) / ( 1024 * 1024 * 1024.0 ), storageElement, totalSpace / ( 1024 * 1024 * 1024.0 ) ) )
return
gLogger.info( "StageRequest__issuePrestageRequests: Selected %s files eligible for staging at %s." % ( len( selectedReplicaIDs ), storageElement ) )
# Now check that the integrity of the eligible files
pfnRepIDs = {}
for replicaID in selectedReplicaIDs:
pfn = allReplicaInfo[replicaID]['PFN']
pfnRepIDs[pfn] = replicaID
res = self.__checkIntegrity( storageElement, pfnRepIDs, allReplicaInfo )
if not res['OK']:
return res
pfnRepIDs = res['Value']
# Now issue the prestage requests for the remaining replicas
stageRequestMetadata = {}
updatedPfnIDs = []
if pfnRepIDs:
gLogger.info( "StageRequest.__issuePrestageRequests: Submitting %s stage requests for %s." % ( len( pfnRepIDs ), storageElement ) )
res = self.replicaManager.prestageStorageFile( pfnRepIDs.keys(), storageElement, lifetime = self.pinLifetime )
gLogger.debug( "StageRequest.__issuePrestageRequests: replicaManager.prestageStorageFile: res=", res )
#res= {'OK': True, 'Value': {'Successful': {}, 'Failed': {'srm://srm-lhcb.cern.ch/castor/cern.ch/grid/lhcb/data/2010/RAW/EXPRESS/LHCb/COLLISION10/71476/071476_0000000241.raw': ' SRM2Storage.__gfal_exec: Failed to perform gfal_prestage.[SE][BringOnline][SRM_INVALID_REQUEST] httpg://srm-lhcb.cern.ch:8443/srm/managerv2: User not able to access specified space token\n'}}}
#res= {'OK': True, 'Value': {'Successful': {'srm://gridka-dCache.fzk.de/pnfs/gridka.de/lhcb/data/2009/RAW/FULL/LHCb/COLLISION09/63495/063495_0000000001.raw': '-2083846379'}, 'Failed': {}}}
if not res['OK']:
gLogger.error( "StageRequest.__issuePrestageRequests: Completely failed to sumbmit stage requests for replicas.", res['Message'] )
else:
for pfn, requestID in res['Value']['Successful'].items():
if not stageRequestMetadata.has_key( requestID ):
stageRequestMetadata[requestID] = []
stageRequestMetadata[requestID].append( pfnRepIDs[pfn] )
#.........这里部分代码省略.........
示例13: StageMonitorAgent
class StageMonitorAgent( AgentModule ):
def initialize( self ):
self.replicaManager = ReplicaManager()
self.stagerClient = StorageManagerClient()
self.dataIntegrityClient = DataIntegrityClient()
#self.storageDB = StorageManagementDB()
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/DataManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption( 'shifterProxy', 'DataManager' )
return S_OK()
def execute( self ):
res = getProxyInfo( disableVOMS = True )
if not res['OK']:
return res
self.proxyInfoDict = res['Value']
res = self.monitorStageRequests()
return res
def monitorStageRequests( self ):
""" This is the third logical task manages the StageSubmitted->Staged transition of the Replicas
"""
res = self.__getStageSubmittedReplicas()
if not res['OK']:
gLogger.fatal( "StageMonitor.monitorStageRequests: Failed to get replicas from StorageManagementDB.", res['Message'] )
return res
if not res['Value']:
gLogger.info( "StageMonitor.monitorStageRequests: There were no StageSubmitted replicas found" )
return res
seReplicas = res['Value']['SEReplicas']
replicaIDs = res['Value']['ReplicaIDs']
gLogger.info( "StageMonitor.monitorStageRequests: Obtained %s StageSubmitted replicas for monitoring." % len( replicaIDs ) )
for storageElement, seReplicaIDs in seReplicas.items():
self.__monitorStorageElementStageRequests( storageElement, seReplicaIDs, replicaIDs )
gDataStoreClient.commit()
return S_OK()
def __monitorStorageElementStageRequests( self, storageElement, seReplicaIDs, replicaIDs ):
terminalReplicaIDs = {}
oldRequests = []
stagedReplicas = []
pfnRepIDs = {}
pfnReqIDs = {}
for replicaID in seReplicaIDs:
pfn = replicaIDs[replicaID]['PFN']
pfnRepIDs[pfn] = replicaID
requestID = replicaIDs[replicaID].get( 'RequestID', None )
if requestID:
pfnReqIDs[pfn] = replicaIDs[replicaID]['RequestID']
gLogger.info( "StageMonitor.__monitorStorageElementStageRequests: Monitoring %s stage requests for %s." % ( len( pfnRepIDs ), storageElement ) )
oAccounting = DataOperation()
oAccounting.setStartTime()
res = self.replicaManager.getStorageFileMetadata( pfnReqIDs.keys(), storageElement )
if not res['OK']:
gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: Completely failed to monitor stage requests for replicas.", res['Message'] )
return
prestageStatus = res['Value']
accountingDict = self.__newAccountingDict( storageElement )
for pfn, reason in prestageStatus['Failed'].items():
accountingDict['TransferTotal'] += 1
if re.search( 'File does not exist', reason ):
gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: PFN did not exist in the StorageElement", pfn )
terminalReplicaIDs[pfnRepIDs[pfn]] = 'PFN did not exist in the StorageElement'
for pfn, staged in prestageStatus['Successful'].items():
if staged and 'Cached' in staged and staged['Cached']:
accountingDict['TransferTotal'] += 1
accountingDict['TransferOK'] += 1
accountingDict['TransferSize'] += staged['Size']
stagedReplicas.append( pfnRepIDs[pfn] )
if staged and 'Cached' in staged and not staged['Cached']:
oldRequests.append( pfnRepIDs[pfn] ); #only ReplicaIDs
oAccounting.setValuesFromDict( accountingDict )
oAccounting.setEndTime()
gDataStoreClient.addRegister( oAccounting )
# Update the states of the replicas in the database
if terminalReplicaIDs:
gLogger.info( "StageMonitor.__monitorStorageElementStageRequests: %s replicas are terminally failed." % len( terminalReplicaIDs ) )
res = self.stagerClient.updateReplicaFailure( terminalReplicaIDs )
if not res['OK']:
gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: Failed to update replica failures.", res['Message'] )
if stagedReplicas:
gLogger.info( "StageMonitor.__monitorStorageElementStageRequests: %s staged replicas to be updated." % len( stagedReplicas ) )
res = self.stagerClient.setStageComplete( stagedReplicas )
if not res['OK']:
gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: Failed to updated staged replicas.", res['Message'] )
res = self.stagerClient.updateReplicaStatus( stagedReplicas, 'Staged' )
#.........这里部分代码省略.........
示例14: MigrationMonitoringAgent
class MigrationMonitoringAgent( AgentModule ):
def initialize( self ):
self.ReplicaManager = ReplicaManager()
self.DataLog = DataLoggingClient()
self.DataIntegrityClient = DataIntegrityClient()
if self.am_getOption( 'DirectDB', False ):
from DIRAC.StorageManagementSystem.DB.MigrationMonitoringDB import MigrationMonitoringDB
self.MigrationMonitoringDB = MigrationMonitoringDB()
else:
from DIRAC.StorageManagementSystem.Client.MigrationMonitoringClient import MigrationMonitoringClient
self.MigrationMonitoringDB = MigrationMonitoringClient()
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/DataManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption( 'shifterProxy', 'DataManager' )
self.userName = 'acsmith'
self.storageElements = self.am_getOption( 'StorageElements', ['CERN-RAW'] )
self.lastMonitors = {}
gMonitor.registerActivity( "Iteration", "Agent Loops/min", "MigrationMonitoringAgent", "Loops", gMonitor.OP_SUM )
if self.storageElements:
gLogger.info( "Agent will be initialised to monitor the following SEs:" )
for se in self.storageElements:
gLogger.info( se )
self.lastMonitors[se] = datetime.datetime.utcfromtimestamp( 0.0 )
gMonitor.registerActivity( "Iteration%s" % se, "Agent Loops/min", "MigrationMonitoringAgent", "Loops", gMonitor.OP_SUM )
gMonitor.registerActivity( "MigratingFiles%s" % se, "Files waiting for migration", "MigrationMonitoringAgent", "Files", gMonitor.OP_MEAN )
gMonitor.registerActivity( "MigratedFiles%s" % se, "Newly migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_SUM )
gMonitor.registerActivity( "TotalMigratedFiles%s" % se, "Total migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_ACUM )
gMonitor.registerActivity( "TotalMigratedSize%s" % se, "Total migrated file size", "MigrationMonitoringAgent", "GB", gMonitor.OP_ACUM )
gMonitor.registerActivity( "ChecksumMatches%s" % se, "Successfully migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_SUM )
gMonitor.registerActivity( "TotalChecksumMatches%s" % se, "Total successfully migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_ACUM )
gMonitor.registerActivity( "ChecksumMismatches%s" % se, "Erroneously migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_SUM )
gMonitor.registerActivity( "TotalChecksumMismatches%s" % se, "Total erroneously migrated files", "MigrationMonitoringAgent", "Files", gMonitor.OP_ACUM )
gMonitor.registerActivity( "MigrationTime%s" % se, "Average migration time", "MigrationMonitoringAgent", "Seconds", gMonitor.OP_MEAN )
return S_OK()
def execute( self ):
self.enableFlag = self.am_getOption( 'EnableFlag', 'True' )
if not self.enableFlag == 'True':
self.log.info( 'MigrationMonitoringAgent is disabled by configuration option %s/EnableFlag' % ( self.section ) )
return S_OK( 'Disabled via CS flag' )
gMonitor.addMark( "Iteration", 1 )
self.NewToMigrating()
for se in self.storageElements:
gMonitor.addMark( "Iteration%s" % se, 1 )
self.MigratingToMigrated( se )
return S_OK()
#########################################################################################################
#
# Includes the file size and checksum information for replicas which do not have it
#
def NewToMigrating( self ):
""" Obtain the new files from the migration monitoring db and (where necessary) add the size and checksum information
"""
# First get the new files from the database
gLogger.info( "NewToMigrating: Attempting to obtain 'New' files." )
res = self.__getFiles( '', 'New' )
if not res['OK']:
gLogger.error( "NewToMigrating: Failed to get 'New' files.", res['Message'] )
return res
newFiles = res['Value']['Files']
if not newFiles:
gLogger.info( "NewToMigrating: Found no 'New' files." )
return S_OK()
# Get the metadata from the catalog for which do not have size or checksum
res = self.__getCatalogFileMetadata( newFiles )
if not res['OK']:
gLogger.error( "NewToMigrating: Failed to get metadata for files", res['Message'] )
return res
metadata = res['Value']
# Add the metadata to the migration monitoring DB.
res = self.__updateNewMigrating( metadata )
return S_OK()
def __updateNewMigrating( self, fileMetadata ):
gLogger.info( "__updateNewMigrating: Updating metadata for %s files" % len( fileMetadata ) )
gLogger.info( "PUT THE CODE HERE TO UPDATE THE METDATA" )
#self.__setMigratingReplicaStatus(fileMetadata.keys(),'Migrating')
return S_OK()
#########################################################################################################
#
# Monitors the migration of files
#
def MigratingToMigrated( self, se ):
""" Obtain the active files from the migration monitoring db and check their status
"""
# First get the migrating files from the database
gLogger.info( "[%s] MigratingToMigrated: Attempting to obtain 'Migrating' files." % se )
res = self.__getFiles( se, 'Migrating' )
if not res['OK']:
gLogger.error( "[%s] MigratingToMigrated: Failed to get 'Migrating' files." % se, res['Message'] )
#.........这里部分代码省略.........