本文整理汇总了Python中DIRAC.DataManagementSystem.Client.DataIntegrityClient.DataIntegrityClient.setFileProblematic方法的典型用法代码示例。如果您正苦于以下问题:Python DataIntegrityClient.setFileProblematic方法的具体用法?Python DataIntegrityClient.setFileProblematic怎么用?Python DataIntegrityClient.setFileProblematic使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类DIRAC.DataManagementSystem.Client.DataIntegrityClient.DataIntegrityClient
的用法示例。
在下文中一共展示了DataIntegrityClient.setFileProblematic方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: StageRequestAgent
# 需要导入模块: from DIRAC.DataManagementSystem.Client.DataIntegrityClient import DataIntegrityClient [as 别名]
# 或者: from DIRAC.DataManagementSystem.Client.DataIntegrityClient.DataIntegrityClient import setFileProblematic [as 别名]
#.........这里部分代码省略.........
for storageElement, seReplicaIDs in offlineReplicas.items():
if not storageElement in seReplicas:
seReplicas[storageElement] = []
for replicaID in sorted( seReplicaIDs ):
if replicaID in replicasToStage:
seReplicaIDs.remove( replicaID )
seReplicas[storageElement].extend( seReplicaIDs )
replicasToStage.extend( seReplicaIDs )
for replicaID in allReplicaInfo.keys():
if replicaID not in replicasToStage:
del allReplicaInfo[replicaID]
totalSize = 0
for storageElement in sorted( seReplicas.keys() ):
replicaIDs = seReplicas[storageElement]
size = 0
for replicaID in replicaIDs:
size += self.__add( storageElement, allReplicaInfo[replicaID]['Size'] )
gLogger.info( 'StageRequest.__addAssociatedReplicas: Considering %s GB to be staged at %s' % ( size, storageElement ) )
totalSize += size
gLogger.info( "StageRequest.__addAssociatedReplicas: Obtained %s GB for staging." % totalSize )
return S_OK( {'SEReplicas':seReplicas, 'AllReplicaInfo':allReplicaInfo} )
def __checkIntegrity( self, storageElement, seReplicaIDs, allReplicaInfo ):
""" Check the integrity of the files to ensure they are available
Updates status of Offline Replicas for a later pass
Return list of Online replicas to be Stage
"""
if not seReplicaIDs:
return S_OK( {'Online': [], 'Offline': []} )
pfnRepIDs = {}
for replicaID in seReplicaIDs:
pfn = allReplicaInfo[replicaID]['PFN']
pfnRepIDs[pfn] = replicaID
gLogger.info( "StageRequest.__checkIntegrity: Checking the integrity of %s replicas at %s." % ( len( pfnRepIDs ), storageElement ) )
res = self.replicaManager.getStorageFileMetadata( pfnRepIDs.keys(), storageElement )
if not res['OK']:
gLogger.error( "StageRequest.__checkIntegrity: Completely failed to obtain metadata for replicas.", res['Message'] )
return res
terminalReplicaIDs = {}
onlineReplicaIDs = []
offlineReplicaIDs = []
for pfn, metadata in res['Value']['Successful'].items():
if metadata['Size'] != allReplicaInfo[pfnRepIDs[pfn]]['Size']:
gLogger.error( "StageRequest.__checkIntegrity: PFN StorageElement size does not match FileCatalog", pfn )
terminalReplicaIDs[pfnRepIDs[pfn]] = 'PFN StorageElement size does not match FileCatalog'
pfnRepIDs.pop( pfn )
elif metadata['Lost']:
gLogger.error( "StageRequest.__checkIntegrity: PFN has been Lost by the StorageElement", pfn )
terminalReplicaIDs[pfnRepIDs[pfn]] = 'PFN has been Lost by the StorageElement'
pfnRepIDs.pop( pfn )
elif metadata['Unavailable']:
gLogger.error( "StageRequest.__checkIntegrity: PFN is declared Unavailable by the StorageElement", pfn )
terminalReplicaIDs[pfnRepIDs[pfn]] = 'PFN is declared Unavailable by the StorageElement'
pfnRepIDs.pop( pfn )
else:
if metadata['Cached']:
gLogger.verbose( "StageRequest.__checkIntegrity: Cache hit for file." )
onlineReplicaIDs.append( pfnRepIDs[pfn] )
else:
offlineReplicaIDs.append( pfnRepIDs[pfn] )
for pfn, reason in res['Value']['Failed'].items():
if re.search( 'File does not exist', reason ):
gLogger.error( "StageRequest.__checkIntegrity: PFN does not exist in the StorageElement", pfn )
terminalReplicaIDs[pfnRepIDs[pfn]] = 'PFN does not exist in the StorageElement'
pfnRepIDs.pop( pfn )
# Update the states of the replicas in the database #TODO Sent status to integrity DB
if terminalReplicaIDs:
gLogger.info( "StageRequest.__checkIntegrity: %s replicas are terminally failed." % len( terminalReplicaIDs ) )
res = self.stagerClient.updateReplicaFailure( terminalReplicaIDs )
if not res['OK']:
gLogger.error( "StageRequest.__checkIntegrity: Failed to update replica failures.", res['Message'] )
if onlineReplicaIDs:
gLogger.info( "StageRequest.__checkIntegrity: %s replicas found Online." % len( onlineReplicaIDs ) )
if offlineReplicaIDs:
gLogger.info( "StageRequest.__checkIntegrity: %s replicas found Offline." % len( offlineReplicaIDs ) )
res = self.stagerClient.updateReplicaStatus( offlineReplicaIDs, 'Offline' )
return S_OK( {'Online': onlineReplicaIDs, 'Offline': offlineReplicaIDs} )
def __reportProblematicFiles( self, lfns, reason ):
return S_OK()
res = self.dataIntegrityClient.setFileProblematic( lfns, reason, sourceComponent = 'StageRequestAgent' )
if not res['OK']:
gLogger.error( "RequestPreparation.__reportProblematicFiles: Failed to report missing files.", res['Message'] )
return res
if res['Value']['Successful']:
gLogger.info( "RequestPreparation.__reportProblematicFiles: Successfully reported %s missing files." % len( res['Value']['Successful'] ) )
if res['Value']['Failed']:
gLogger.info( "RequestPreparation.__reportProblematicFiles: Failed to report %s problematic files." % len( res['Value']['Failed'] ) )
return res
示例2: RequestPreparationAgent
# 需要导入模块: from DIRAC.DataManagementSystem.Client.DataIntegrityClient import DataIntegrityClient [as 别名]
# 或者: from DIRAC.DataManagementSystem.Client.DataIntegrityClient.DataIntegrityClient import setFileProblematic [as 别名]
#.........这里部分代码省略.........
gLogger.error( "RequestPreparation.prepareNewReplicas: Failed to update replica failures.", res['Message'] )
if replicaMetadata:
gLogger.info( "RequestPreparation.prepareNewReplicas: %s replica metadata to be updated." % len( replicaMetadata ) )
# Sets the Status='Waiting' of CacheReplicas records that are OK with catalogue checks
res = self.stagerClient.updateReplicaInformation( replicaMetadata )
if not res['OK']:
gLogger.error( "RequestPreparation.prepareNewReplicas: Failed to update replica metadata.", res['Message'] )
return S_OK()
def __getNewReplicas( self ):
""" This obtains the New replicas from the Replicas table and for each LFN the requested storage element """
# First obtain the New replicas from the CacheReplicas table
res = self.stagerClient.getCacheReplicas( {'Status':'New'} )
if not res['OK']:
gLogger.error( "RequestPreparation.__getNewReplicas: Failed to get replicas with New status.", res['Message'] )
return res
if not res['Value']:
gLogger.debug( "RequestPreparation.__getNewReplicas: No New replicas found to process." )
return S_OK()
else:
gLogger.debug( "RequestPreparation.__getNewReplicas: Obtained %s New replicas(s) to process." % len( res['Value'] ) )
replicas = {}
replicaIDs = {}
for replicaID, info in res['Value'].items():
lfn = info['LFN']
storageElement = info['SE']
replicas.setdefault( lfn, {} )[storageElement] = replicaID
replicaIDs[replicaID] = ( lfn, storageElement )
return S_OK( {'Replicas':replicas, 'ReplicaIDs':replicaIDs} )
def __getExistingFiles( self, lfns ):
""" This checks that the files exist in the FileCatalog. """
res = self.fileCatalog.exists( list( set( lfns ) ) )
if not res['OK']:
gLogger.error( "RequestPreparation.__getExistingFiles: Failed to determine whether files exist.", res['Message'] )
return res
failed = res['Value']['Failed']
success = res['Value']['Successful']
exist = [lfn for lfn, exists in success.items() if exists]
missing = list( set( success ) - set( exist ) )
if missing:
reason = 'LFN not registered in the FC'
gLogger.warn( "RequestPreparation.__getExistingFiles: %s" % reason, '\n'.join( [''] + missing ) )
self.__reportProblematicFiles( missing, 'LFN-LFC-DoesntExist' )
missing = dict.fromkeys( missing, reason )
else:
missing = {}
return S_OK( {'Exist':exist, 'Missing':missing, 'Failed':failed} )
def __getFileSize( self, lfns ):
""" This obtains the file size from the FileCatalog. """
fileSizes = {}
zeroSize = {}
res = self.fileCatalog.getFileSize( lfns )
if not res['OK']:
gLogger.error( "RequestPreparation.__getFileSize: Failed to get sizes for files.", res['Message'] )
return res
failed = res['Value']['Failed']
for lfn, size in res['Value']['Successful'].items():
if size == 0:
zeroSize[lfn] = "LFN registered with zero size in the FileCatalog"
else:
fileSizes[lfn] = size
if zeroSize:
for lfn, reason in zeroSize.items():
gLogger.warn( "RequestPreparation.__getFileSize: %s" % reason, lfn )
self.__reportProblematicFiles( zeroSize.keys(), 'LFN-LFC-ZeroSize' )
return S_OK( {'FileSizes':fileSizes, 'ZeroSize':zeroSize, 'Failed':failed} )
def __getFileReplicas( self, lfns ):
""" This obtains the replicas from the FileCatalog. """
replicas = {}
noReplicas = {}
res = self.dm.getActiveReplicas( lfns )
if not res['OK']:
gLogger.error( "RequestPreparation.__getFileReplicas: Failed to obtain file replicas.", res['Message'] )
return res
failed = res['Value']['Failed']
for lfn, lfnReplicas in res['Value']['Successful'].items():
if len( lfnReplicas.keys() ) == 0:
noReplicas[lfn] = "LFN registered with zero replicas in the FileCatalog"
else:
replicas[lfn] = lfnReplicas
if noReplicas:
for lfn, reason in noReplicas.items():
gLogger.warn( "RequestPreparation.__getFileReplicas: %s" % reason, lfn )
self.__reportProblematicFiles( noReplicas.keys(), 'LFN-LFC-NoReplicas' )
return S_OK( {'Replicas':replicas, 'ZeroReplicas':noReplicas, 'Failed':failed} )
def __reportProblematicFiles( self, lfns, reason ):
return S_OK()
res = self.dataIntegrityClient.setFileProblematic( lfns, reason, sourceComponent = 'RequestPreparationAgent' )
if not res['OK']:
gLogger.error( "RequestPreparation.__reportProblematicFiles: Failed to report missing files.", res['Message'] )
return res
if res['Value']['Successful']:
gLogger.info( "RequestPreparation.__reportProblematicFiles: Successfully reported %s missing files." % len( res['Value']['Successful'] ) )
if res['Value']['Failed']:
gLogger.info( "RequestPreparation.__reportProblematicFiles: Failed to report %s problematic files." % len( res['Value']['Failed'] ) )
return res
示例3: StageMonitorAgent
# 需要导入模块: from DIRAC.DataManagementSystem.Client.DataIntegrityClient import DataIntegrityClient [as 别名]
# 或者: from DIRAC.DataManagementSystem.Client.DataIntegrityClient.DataIntegrityClient import setFileProblematic [as 别名]
#.........这里部分代码省略.........
res = self.__getStageSubmittedReplicas()
if not res['OK']:
gLogger.fatal( "StageMonitor.monitorStageRequests: Failed to get replicas from StorageManagementDB.", res['Message'] )
return res
if not res['Value']:
gLogger.info( "StageMonitor.monitorStageRequests: There were no StageSubmitted replicas found" )
return res
seReplicas = res['Value']['SEReplicas']
replicaIDs = res['Value']['ReplicaIDs']
gLogger.info( "StageMonitor.monitorStageRequests: Obtained %s StageSubmitted replicas for monitoring." % len( replicaIDs ) )
for storageElement, seReplicaIDs in seReplicas.items():
self.__monitorStorageElementStageRequests( storageElement, seReplicaIDs, replicaIDs )
return S_OK()
def __monitorStorageElementStageRequests( self, storageElement, seReplicaIDs, replicaIDs ):
terminalReplicaIDs = {}
stagedReplicas = []
pfnRepIDs = {}
pfnReqIDs = {}
for replicaID in seReplicaIDs:
pfn = replicaIDs[replicaID]['PFN']
pfnRepIDs[pfn] = replicaID
requestID = replicaIDs[replicaID].get( 'RequestID', None )
if requestID:
pfnReqIDs[pfn] = replicaIDs[replicaID]['RequestID']
gLogger.info( "StageMonitor.__monitorStorageElementStageRequests: Monitoring %s stage requests for %s." % ( len( pfnRepIDs ), storageElement ) )
res = self.replicaManager.getStorageFileMetadata( pfnReqIDs.keys(), storageElement )
if not res['OK']:
gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: Completely failed to monitor stage requests for replicas.", res['Message'] )
return
prestageStatus = res['Value']
for pfn, reason in prestageStatus['Failed'].items():
if re.search( 'File does not exist', reason ):
gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: PFN did not exist in the StorageElement", pfn )
terminalReplicaIDs[pfnRepIDs[pfn]] = 'PFN did not exist in the StorageElement'
for pfn, staged in prestageStatus['Successful'].items():
if staged and 'Cached' in staged and staged['Cached']:
stagedReplicas.append( pfnRepIDs[pfn] )
# Update the states of the replicas in the database
if terminalReplicaIDs:
gLogger.info( "StageMonitor.__monitorStorageElementStageRequests: %s replicas are terminally failed." % len( terminalReplicaIDs ) )
res = self.storageDB.updateReplicaFailure( terminalReplicaIDs )
if not res['OK']:
gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: Failed to update replica failures.", res['Message'] )
if stagedReplicas:
gLogger.info( "StageMonitor.__monitorStorageElementStageRequests: %s staged replicas to be updated." % len( stagedReplicas ) )
res = self.storageDB.setStageComplete( stagedReplicas )
if not res['OK']:
gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: Failed to updated staged replicas.", res['Message'] )
res = self.storageDB.updateReplicaStatus( stagedReplicas, 'Staged' )
if not res['OK']:
gLogger.error( "StageRequest.__monitorStorageElementStageRequests: Failed to insert replica status.", res['Message'] )
return
def __getStageSubmittedReplicas( self ):
""" This obtains the StageSubmitted replicas from the Replicas table and the RequestID from the StageRequests table """
res = self.storageDB.getCacheReplicas( {'Status':'StageSubmitted'} )
if not res['OK']:
gLogger.error( "StageRequest.__getStageSubmittedReplicas: Failed to get replicas with StageSubmitted status.", res['Message'] )
return res
if not res['Value']:
gLogger.debug( "StageRequest.__getStageSubmittedReplicas: No StageSubmitted replicas found to process." )
return S_OK()
else:
gLogger.debug( "StageRequest.__getStageSubmittedReplicas: Obtained %s StageSubmitted replicas(s) to process." % len( res['Value'] ) )
seReplicas = {}
replicaIDs = res['Value']
for replicaID, info in replicaIDs.items():
storageElement = info['SE']
if not seReplicas.has_key( storageElement ):
seReplicas[storageElement] = []
seReplicas[storageElement].append( replicaID )
# RequestID was missing from replicaIDs dictionary BUGGY?
res = self.storageDB.getStageRequests( {'ReplicaID':replicaIDs.keys()} )
if not res['OK']:
return res
if not res['Value']:
return S_ERROR( 'Could not obtain request IDs for replicas %s from StageRequests table' % ( replicaIDs.keys() ) )
for replicaID, info in res['Value'].items():
reqID = info['RequestID']
replicaIDs[replicaID]['RequestID'] = reqID
return S_OK( {'SEReplicas':seReplicas, 'ReplicaIDs':replicaIDs} )
def __reportProblematicFiles( self, lfns, reason ):
return S_OK()
res = self.dataIntegrityClient.setFileProblematic( lfns, reason, self.name )
if not res['OK']:
gLogger.error( "RequestPreparation.__reportProblematicFiles: Failed to report missing files.", res['Message'] )
return res
if res['Value']['Successful']:
gLogger.info( "RequestPreparation.__reportProblematicFiles: Successfully reported %s missing files." % len( res['Value']['Successful'] ) )
if res['Value']['Failed']:
gLogger.info( "RequestPreparation.__reportProblematicFiles: Failed to report %s problematic files." % len( res['Value']['Failed'] ) )
return res
示例4: StageRequestAgent
# 需要导入模块: from DIRAC.DataManagementSystem.Client.DataIntegrityClient import DataIntegrityClient [as 别名]
# 或者: from DIRAC.DataManagementSystem.Client.DataIntegrityClient.DataIntegrityClient import setFileProblematic [as 别名]
#.........这里部分代码省略.........
# Now issue the prestage requests for the remaining replicas
stageRequestMetadata = {}
updatedPfnIDs = []
if pfnRepIDs:
gLogger.info( "StageRequest.__issuePrestageRequests: Submitting %s stage requests for %s." % ( len( pfnRepIDs ), storageElement ) )
res = self.replicaManager.prestageStorageFile( pfnRepIDs.keys(), storageElement, lifetime = self.pinLifetime )
gLogger.debug( "StageRequest.__issuePrestageRequests: replicaManager.prestageStorageFile: res=", res )
#res= {'OK': True, 'Value': {'Successful': {}, 'Failed': {'srm://srm-lhcb.cern.ch/castor/cern.ch/grid/lhcb/data/2010/RAW/EXPRESS/LHCb/COLLISION10/71476/071476_0000000241.raw': ' SRM2Storage.__gfal_exec: Failed to perform gfal_prestage.[SE][BringOnline][SRM_INVALID_REQUEST] httpg://srm-lhcb.cern.ch:8443/srm/managerv2: User not able to access specified space token\n'}}}
#res= {'OK': True, 'Value': {'Successful': {'srm://gridka-dCache.fzk.de/pnfs/gridka.de/lhcb/data/2009/RAW/FULL/LHCb/COLLISION09/63495/063495_0000000001.raw': '-2083846379'}, 'Failed': {}}}
if not res['OK']:
gLogger.error( "StageRequest.__issuePrestageRequests: Completely failed to sumbmit stage requests for replicas.", res['Message'] )
else:
for pfn, requestID in res['Value']['Successful'].items():
if not stageRequestMetadata.has_key( requestID ):
stageRequestMetadata[requestID] = []
stageRequestMetadata[requestID].append( pfnRepIDs[pfn] )
updatedPfnIDs.append( pfnRepIDs[pfn] )
if stageRequestMetadata:
gLogger.info( "StageRequest.__issuePrestageRequests: %s stage request metadata to be updated." % len( stageRequestMetadata ) )
res = self.storageDB.insertStageRequest( stageRequestMetadata, self.pinLifetime )
if not res['OK']:
gLogger.error( "StageRequest.__issuePrestageRequests: Failed to insert stage request metadata.", res['Message'] )
res = self.storageDB.updateReplicaStatus( updatedPfnIDs, 'StageSubmitted' )
if not res['OK']:
gLogger.error( "StageRequest.__issuePrestageRequests: Failed to insert replica status.", res['Message'] )
return
def __getWaitingReplicas( self ):
""" This obtains the Waiting replicas from the Replicas table and for each LFN the requested storage element """
# First obtain the Waiting replicas from the Replicas table
res = self.storageDB.getWaitingReplicas()
if not res['OK']:
gLogger.error( "StageRequest.__getWaitingReplicas: Failed to get replicas with Waiting status.", res['Message'] )
return res
if not res['Value']:
gLogger.debug( "StageRequest.__getWaitingReplicas: No Waiting replicas found to process." )
return S_OK()
else:
gLogger.debug( "StageRequest.__getWaitingReplicas: Obtained %s Waiting replicas(s) to process." % len( res['Value'] ) )
seReplicas = {}
replicaIDs = {}
for replicaID, info in res['Value'].items():
lfn = info['LFN']
storageElement = info['SE']
size = info['Size']
pfn = info['PFN']
# lfn,storageElement,size,pfn = info
replicaIDs[replicaID] = {'LFN':lfn, 'PFN':pfn, 'Size':size, 'StorageElement':storageElement}
if not seReplicas.has_key( storageElement ):
seReplicas[storageElement] = []
seReplicas[storageElement].append( replicaID )
return S_OK( {'SEReplicas':seReplicas, 'ReplicaIDs':replicaIDs} )
def __checkIntegrity( self, storageElement, pfnRepIDs, replicaIDs ):
# Check the integrity of the files to ensure they are available
terminalReplicaIDs = {}
gLogger.info( "StageRequest.__checkIntegrity: Checking the integrity of %s replicas at %s." % ( len( pfnRepIDs ), storageElement ) )
res = self.replicaManager.getStorageFileMetadata( pfnRepIDs.keys(), storageElement )
if not res['OK']:
gLogger.error( "StageRequest.__checkIntegrity: Completely failed to obtain metadata for replicas.", res['Message'] )
return res
for pfn, metadata in res['Value']['Successful'].items():
if metadata['Cached']:
gLogger.info( "StageRequest.__checkIntegrity: Cache hit for file." )
if metadata['Size'] != replicaIDs[pfnRepIDs[pfn]]['Size']:
gLogger.error( "StageRequest.__checkIntegrity: PFN StorageElement size does not match FileCatalog", pfn )
terminalReplicaIDs[pfnRepIDs[pfn]] = 'PFN StorageElement size does not match FileCatalog'
pfnRepIDs.pop( pfn )
elif metadata['Lost']:
gLogger.error( "StageRequest.__checkIntegrity: PFN has been Lost by the StorageElement", pfn )
terminalReplicaIDs[pfnRepIDs[pfn]] = 'PFN has been Lost by the StorageElement'
pfnRepIDs.pop( pfn )
elif metadata['Unavailable']:
gLogger.error( "StageRequest.__checkIntegrity: PFN is declared Unavailable by the StorageElement", pfn )
terminalReplicaIDs[pfnRepIDs[pfn]] = 'PFN is declared Unavailable by the StorageElement'
pfnRepIDs.pop( pfn )
for pfn, reason in res['Value']['Failed'].items():
if re.search( 'File does not exist', reason ):
gLogger.error( "StageRequest.__checkIntegrity: PFN does not exist in the StorageElement", pfn )
terminalReplicaIDs[pfnRepIDs[pfn]] = 'PFN does not exist in the StorageElement'
pfnRepIDs.pop( pfn )
# Update the states of the replicas in the database #TODO Sent status to integrity DB
if terminalReplicaIDs:
gLogger.info( "StageRequest.__checkIntegrity: %s replicas are terminally failed." % len( terminalReplicaIDs ) )
res = self.storageDB.updateReplicaFailure( terminalReplicaIDs )
if not res['OK']:
gLogger.error( "StageRequest.__checkIntegrity: Failed to update replica failures.", res['Message'] )
return S_OK( pfnRepIDs )
def __reportProblematicFiles( self, lfns, reason ):
return S_OK()
res = self.dataIntegrityClient.setFileProblematic( lfns, reason, self.name )
if not res['OK']:
gLogger.error( "RequestPreparation.__reportProblematicFiles: Failed to report missing files.", res['Message'] )
return res
if res['Value']['Successful']:
gLogger.info( "RequestPreparation.__reportProblematicFiles: Successfully reported %s missing files." % len( res['Value']['Successful'] ) )
if res['Value']['Failed']:
gLogger.info( "RequestPreparation.__reportProblematicFiles: Failed to report %s problematic files." % len( res['Value']['Failed'] ) )
return res
示例5: StageMonitorAgent
# 需要导入模块: from DIRAC.DataManagementSystem.Client.DataIntegrityClient import DataIntegrityClient [as 别名]
# 或者: from DIRAC.DataManagementSystem.Client.DataIntegrityClient.DataIntegrityClient import setFileProblematic [as 别名]
#.........这里部分代码省略.........
oAccounting.setValuesFromDict( accountingDict )
oAccounting.setEndTime()
gDataStoreClient.addRegister( oAccounting )
# Update the states of the replicas in the database
if terminalReplicaIDs:
gLogger.info( "StageMonitor.__monitorStorageElementStageRequests: %s replicas are terminally failed." % len( terminalReplicaIDs ) )
res = self.stagerClient.updateReplicaFailure( terminalReplicaIDs )
if not res['OK']:
gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: Failed to update replica failures.", res['Message'] )
if stagedReplicas:
gLogger.info( "StageMonitor.__monitorStorageElementStageRequests: %s staged replicas to be updated." % len( stagedReplicas ) )
res = self.stagerClient.setStageComplete( stagedReplicas )
if not res['OK']:
gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: Failed to updated staged replicas.", res['Message'] )
res = self.stagerClient.updateReplicaStatus( stagedReplicas, 'Staged' )
if not res['OK']:
gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: Failed to insert replica status.", res['Message'] )
if oldRequests:
gLogger.info( "StageMonitor.__monitorStorageElementStageRequests: %s old requests will be retried." % len( oldRequests ) )
res = self.__wakeupOldRequests( oldRequests )
if not res['OK']:
gLogger.error( "StageMonitor.__monitorStorageElementStageRequests: Failed to wakeup old requests.", res['Message'] )
return
def __newAccountingDict( self, storageElement ):
""" Generate a new accounting Dict """
accountingDict = {}
accountingDict['OperationType'] = 'Stage'
accountingDict['User'] = self.proxyInfoDict['username']
accountingDict['Protocol'] = 'Stager'
accountingDict['RegistrationTime'] = 0.0
accountingDict['RegistrationOK'] = 0
accountingDict['RegistrationTotal'] = 0
accountingDict['FinalStatus'] = 'Successful'
accountingDict['Source'] = storageElement
accountingDict['Destination'] = storageElement
accountingDict['ExecutionSite'] = siteName()
accountingDict['TransferTotal'] = 0
accountingDict['TransferOK'] = 0
accountingDict['TransferSize'] = 0
accountingDict['TransferTime'] = self.am_getPollingTime()
return accountingDict
def __getStageSubmittedReplicas( self ):
""" This obtains the StageSubmitted replicas from the Replicas table and the RequestID from the StageRequests table """
res = self.stagerClient.getCacheReplicas( {'Status':'StageSubmitted'} )
if not res['OK']:
gLogger.error( "StageMonitor.__getStageSubmittedReplicas: Failed to get replicas with StageSubmitted status.", res['Message'] )
return res
if not res['Value']:
gLogger.debug( "StageMonitor.__getStageSubmittedReplicas: No StageSubmitted replicas found to process." )
return S_OK()
else:
gLogger.debug( "StageMonitor.__getStageSubmittedReplicas: Obtained %s StageSubmitted replicas(s) to process." % len( res['Value'] ) )
seReplicas = {}
replicaIDs = res['Value']
for replicaID, info in replicaIDs.items():
storageElement = info['SE']
if not seReplicas.has_key( storageElement ):
seReplicas[storageElement] = []
seReplicas[storageElement].append( replicaID )
# RequestID was missing from replicaIDs dictionary BUGGY?
res = self.stagerClient.getStageRequests( {'ReplicaID':replicaIDs.keys()} )
if not res['OK']:
return res
if not res['Value']:
return S_ERROR( 'Could not obtain request IDs for replicas %s from StageRequests table' % ( replicaIDs.keys() ) )
for replicaID, info in res['Value'].items():
reqID = info['RequestID']
replicaIDs[replicaID]['RequestID'] = reqID
return S_OK( {'SEReplicas':seReplicas, 'ReplicaIDs':replicaIDs} )
def __reportProblematicFiles( self, lfns, reason ):
return S_OK()
res = self.dataIntegrityClient.setFileProblematic( lfns, reason, sourceComponent = 'StageMonitorAgent' )
if not res['OK']:
gLogger.error( "StageMonitor.__reportProblematicFiles: Failed to report missing files.", res['Message'] )
return res
if res['Value']['Successful']:
gLogger.info( "StageMonitor.__reportProblematicFiles: Successfully reported %s missing files." % len( res['Value']['Successful'] ) )
if res['Value']['Failed']:
gLogger.info( "StageMonitor.__reportProblematicFiles: Failed to report %s problematic files." % len( res['Value']['Failed'] ) )
return res
def __wakeupOldRequests( self, oldRequests ):
gLogger.info( "StageMonitor.__wakeupOldRequests: Attempting..." )
retryInterval = self.am_getOption( 'RetryIntervalHour', 2 )
res = self.stagerClient.wakeupOldRequests( oldRequests, retryInterval )
if not res['OK']:
gLogger.error( "StageMonitor.__wakeupOldRequests: Failed to resubmit old requests.", res['Message'] )
return res
return S_OK()
示例6: MigrationMonitoringAgent
# 需要导入模块: from DIRAC.DataManagementSystem.Client.DataIntegrityClient import DataIntegrityClient [as 别名]
# 或者: from DIRAC.DataManagementSystem.Client.DataIntegrityClient.DataIntegrityClient import setFileProblematic [as 别名]
#.........这里部分代码省略.........
if not res['OK']:
return res
files = res['Value']
pfnIDs = {}
if len( files.keys() ) > 0:
for fileID, metadataDict in files.items():
pfn = metadataDict['PFN']
pfnIDs[pfn] = fileID
return S_OK( {'PFNIDs':pfnIDs, 'Files':files} )
def __getCatalogFileMetadata( self, files ):
lfnFileID = {}
metadataToObtain = []
for fileID, metadata in files.items():
if not ( metadata['Size'] and metadata['Checksum'] ):
lfn = metadata['LFN']
metadataToObtain.append( lfn )
lfnFileID[lfn] = fileID
if not metadataToObtain:
return S_OK()
res = self.ReplicaManager.getCatalogFileMetadata( metadataToObtain )
if not res['OK']:
gLogger.error( "__getCatalogFileMetadata: Failed to obtain file metadata", res['Message'] )
return res
successful = res['Value']['Successful']
failed = res['Value']['Failed']
terminalIDs = []
problematicFiles = []
for lfn, error in failed.items():
gLogger.error( "__getCatalogFileMetadata: Failed to get file metadata", "%s %s" % ( lfn, error ) )
if re.search( "No such file or directory", error ):
fileID = lfnFileID[lfn]
lfn = files[fileID]['LFN']
pfn = files[fileID]['PFN']
se = files[fileID]['SE']
problematicFiles.append( lfn )
terminalIDs.append( fileID )
if terminalIDs:
self.__reportProblematicFiles( problematicFiles, 'LFNCatalogMissing' )
self.__setMigratingReplicaStatus( terminalIDs, 'Failed' )
fileMetadata = {}
for lfn, metadata in successful.items():
size = metadata['Size']
checksum = metadata['CheckSumValue']
fileMetadata[lfnFileID[lfn]] = {'Size':size, 'Checksum':checksum}
return S_OK( fileMetadata )
def __setMigratingReplicaStatus( self, fileIDs, status ):
gLogger.info( "__setMigratingReplicaStatus: Attempting to update %s files to '%s'" % ( len( fileIDs ), status ) )
res = self.MigrationMonitoringDB.setMigratingReplicaStatus( fileIDs, status )
if not res['OK']:
gLogger.info( "__setMigratingReplicaStatus: Failed to update status of files", res['Message'] )
else:
gLogger.info( "__setMigratingReplicaStatus: Successfully updated status of files" )
def __reportProblematicFiles( self, lfns, reason ):
gLogger.info( '__reportProblematicFiles: The following %s files were found with %s' % ( len( lfns ), reason ) )
for lfn in sortList( lfns ):
gLogger.info( lfn )
res = self.DataIntegrityClient.setFileProblematic( lfns, reason, sourceComponent = 'MigrationMonitoringAgent' )
if not res['OK']:
gLogger.info( '__reportProblematicFiles: Failed to update integrity DB with files', res['Message'] )
else:
gLogger.info( '__reportProblematicFiles: Successfully updated integrity DB with files' )
def __reportProblematicReplicas( self, replicaTuples ):
gLogger.info( '__reportProblematicReplicas: The following %s files being reported to integrity DB:' % ( len( replicaTuples ) ) )
for lfn, pfn, se, reason in sortList( replicaTuples ):
if lfn:
gLogger.info( lfn )
else:
gLogger.info( pfn )
res = self.DataIntegrityClient.setReplicaProblematic( replicaTuples, sourceComponent = 'MigrationMonitoringAgent' )
if not res['OK']:
gLogger.info( '__reportProblematicReplicas: Failed to update integrity DB with replicas', res['Message'] )
else:
gLogger.info( '__reportProblematicReplicas: Successfully updated integrity DB with replicas' )
def __initialiseAccountingObject( self, operation, se, startTime, endTime, size ):
accountingDict = {}
accountingDict['OperationType'] = operation
accountingDict['User'] = self.userName
accountingDict['Protocol'] = 'SRM'
accountingDict['RegistrationTime'] = 0.0
accountingDict['RegistrationOK'] = 0
accountingDict['RegistrationTotal'] = 0
accountingDict['TransferTotal'] = 1
accountingDict['TransferOK'] = 1
accountingDict['TransferSize'] = size
timeDiff = endTime - startTime
transferTime = ( timeDiff.days * 86400 ) + ( timeDiff.seconds ) + ( timeDiff.microseconds / 1000000.0 )
accountingDict['TransferTime'] = transferTime
accountingDict['FinalStatus'] = 'Successful'
accountingDict['Source'] = siteName()
accountingDict['Destination'] = se
oDataOperation = DataOperation()
oDataOperation.setEndTime( endTime )
oDataOperation.setStartTime( startTime )
oDataOperation.setValuesFromDict( accountingDict )
return oDataOperation