本文整理汇总了Python中DIRAC.DataManagementSystem.Client.DataManager.DataManager.replicateAndRegister方法的典型用法代码示例。如果您正苦于以下问题:Python DataManager.replicateAndRegister方法的具体用法?Python DataManager.replicateAndRegister怎么用?Python DataManager.replicateAndRegister使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类DIRAC.DataManagementSystem.Client.DataManager.DataManager
的用法示例。
在下文中一共展示了DataManager.replicateAndRegister方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _replicate
# 需要导入模块: from DIRAC.DataManagementSystem.Client.DataManager import DataManager [as 别名]
# 或者: from DIRAC.DataManagementSystem.Client.DataManager.DataManager import replicateAndRegister [as 别名]
def _replicate(self, lfn, destinationSE, sourceSE="", localCache=""):
dm = DataManager()
result = dm.replicateAndRegister(lfn, destinationSE, sourceSE, '', localCache)
if not result['OK']:
print 'ERROR %s' % (result['Message'])
return result
else:
return S_OK(result['Value']['Successful'][lfn])
示例2: ReplicaManagerTestCase
# 需要导入模块: from DIRAC.DataManagementSystem.Client.DataManager import DataManager [as 别名]
# 或者: from DIRAC.DataManagementSystem.Client.DataManager.DataManager import replicateAndRegister [as 别名]
class ReplicaManagerTestCase(unittest.TestCase):
""" Base class for the Replica Manager test cases
"""
def setUp(self):
self.dataManager = DataManager()
self.fileName = '/tmp/temporaryLocalFile'
file = open(self.fileName,'w')
file.write("%s" % time.time())
file.close()
def test_putAndRegister(self):
print '\n\n#########################################################################\n\n\t\t\tPut and register test\n'
lfn = '/lhcb/test/unit-test/ReplicaManager/putAndRegister/testFile.%s' % time.time()
diracSE = 'GRIDKA-RAW'
putRes = self.dataManager.putAndRegister(lfn, self.fileName, diracSE)
removeRes = self.dataManager.removeFile(lfn)
# Check that the put was successful
self.assert_(putRes['OK'])
self.assert_(putRes['Value'].has_key('Successful'))
self.assert_(putRes['Value']['Successful'].has_key(lfn))
self.assert_(putRes['Value']['Successful'][lfn])
# Check that the removal was successful
self.assert_(removeRes['OK'])
self.assert_(removeRes['Value'].has_key('Successful'))
self.assert_(removeRes['Value']['Successful'].has_key(lfn))
self.assert_(removeRes['Value']['Successful'][lfn])
def test_putAndRegisterReplicate(self):
print '\n\n#########################################################################\n\n\t\t\tReplication test\n'
lfn = '/lhcb/test/unit-test/ReplicaManager/putAndRegisterReplicate/testFile.%s' % time.time()
diracSE = 'GRIDKA-RAW'
putRes = self.dataManager.putAndRegister(lfn, self.fileName, diracSE)
replicateRes = self.dataManager.replicateAndRegister(lfn,'CNAF-DST') #,sourceSE='',destPath='',localCache='')
removeRes = self.dataManager.removeFile(lfn)
# Check that the put was successful
self.assert_(putRes['OK'])
self.assert_(putRes['Value'].has_key('Successful'))
self.assert_(putRes['Value']['Successful'].has_key(lfn))
self.assert_(putRes['Value']['Successful'][lfn])
# Check that the replicate was successful
self.assert_(replicateRes['OK'])
self.assert_(replicateRes['Value'].has_key('Successful'))
self.assert_(replicateRes['Value']['Successful'].has_key(lfn))
self.assert_(replicateRes['Value']['Successful'][lfn])
# Check that the removal was successful
self.assert_(removeRes['OK'])
self.assert_(removeRes['Value'].has_key('Successful'))
self.assert_(removeRes['Value']['Successful'].has_key(lfn))
self.assert_(removeRes['Value']['Successful'][lfn])
def test_putAndRegisterGetReplicaMetadata(self):
print '\n\n#########################################################################\n\n\t\t\tGet metadata test\n'
lfn = '/lhcb/test/unit-test/ReplicaManager/putAndRegisterGetReplicaMetadata/testFile.%s' % time.time()
diracSE = 'GRIDKA-RAW'
putRes = self.dataManager.putAndRegister(lfn, self.fileName, diracSE)
metadataRes = self.dataManager.getReplicaMetadata(lfn,diracSE)
removeRes = self.dataManager.removeFile(lfn)
# Check that the put was successful
self.assert_(putRes['OK'])
self.assert_(putRes['Value'].has_key('Successful'))
self.assert_(putRes['Value']['Successful'].has_key(lfn))
self.assert_(putRes['Value']['Successful'][lfn])
# Check that the metadata query was successful
self.assert_(metadataRes['OK'])
self.assert_(metadataRes['Value'].has_key('Successful'))
self.assert_(metadataRes['Value']['Successful'].has_key(lfn))
self.assert_(metadataRes['Value']['Successful'][lfn])
metadataDict = metadataRes['Value']['Successful'][lfn]
self.assert_(metadataDict.has_key('Cached'))
self.assert_(metadataDict.has_key('Migrated'))
self.assert_(metadataDict.has_key('Size'))
# Check that the removal was successful
self.assert_(removeRes['OK'])
self.assert_(removeRes['Value'].has_key('Successful'))
self.assert_(removeRes['Value']['Successful'].has_key(lfn))
self.assert_(removeRes['Value']['Successful'][lfn])
def test_putAndRegsiterGetAccessUrl(self):
print '\n\n#########################################################################\n\n\t\t\tGet Access Url test\n'
lfn = '/lhcb/test/unit-test/ReplicaManager/putAndRegisterGetAccessUrl/testFile.%s' % time.time()
diracSE = 'GRIDKA-RAW'
putRes = self.dataManager.putAndRegister(lfn, self.fileName, diracSE)
getAccessUrlRes = self.dataManager.getReplicaAccessUrl(lfn,diracSE)
print getAccessUrlRes
removeRes = self.dataManager.removeFile(lfn)
# Check that the put was successful
self.assert_(putRes['OK'])
self.assert_(putRes['Value'].has_key('Successful'))
self.assert_(putRes['Value']['Successful'].has_key(lfn))
self.assert_(putRes['Value']['Successful'][lfn])
# Check that the access url was successful
self.assert_(getAccessUrlRes['OK'])
self.assert_(getAccessUrlRes['Value'].has_key('Successful'))
self.assert_(getAccessUrlRes['Value']['Successful'].has_key(lfn))
self.assert_(getAccessUrlRes['Value']['Successful'][lfn])
#.........这里部分代码省略.........
示例3: DataIntegrityClient
# 需要导入模块: from DIRAC.DataManagementSystem.Client.DataManager import DataManager [as 别名]
# 或者: from DIRAC.DataManagementSystem.Client.DataManager.DataManager import replicateAndRegister [as 别名]
#.........这里部分代码省略.........
if not res['Value']:
gLogger.info( "PFNMissing file (%d) no longer exists in catalog" % fileID )
return self.__updateCompletedFiles( 'PFNMissing', fileID )
res = Utils.executeSingleFileOrDirWrapper( StorageElement( se ).exists( pfn ) )
if not res['OK']:
return self.__returnProblematicError( fileID, res )
if res['Value']:
gLogger.info( "PFNMissing replica (%d) is no longer missing" % fileID )
return self.__updateReplicaToChecked( problematicDict )
gLogger.info( "PFNMissing replica (%d) does not exist" % fileID )
res = Utils.executeSingleFileOrDirWrapper( self.fc.getReplicas( lfn, allStatus = True ) )
if not res['OK']:
return self.__returnProblematicError( fileID, res )
replicas = res['Value']
seSite = se.split( '_' )[0].split( '-' )[0]
found = False
print replicas
for replicaSE in replicas.keys():
if re.search( seSite, replicaSE ):
found = True
problematicDict['SE'] = replicaSE
se = replicaSE
if not found:
gLogger.info( "PFNMissing replica (%d) is no longer registered at SE. Resolved." % fileID )
return self.__updateCompletedFiles( 'PFNMissing', fileID )
gLogger.info( "PFNMissing replica (%d) does not exist. Removing from catalog..." % fileID )
res = Utils.executeSingleFileOrDirWrapper( self.fc.removeReplica( {lfn:problematicDict} ) )
if not res['OK']:
return self.__returnProblematicError( fileID, res )
if len( replicas ) == 1:
gLogger.info( "PFNMissing replica (%d) had a single replica. Updating prognosis" % fileID )
return self.changeProblematicPrognosis( fileID, 'LFNZeroReplicas' )
res = self.dm.replicateAndRegister( problematicDict['LFN'], se )
if not res['OK']:
return self.__returnProblematicError( fileID, res )
# If we get here the problem is solved so we can update the integrityDB
return self.__updateCompletedFiles( 'PFNMissing', fileID )
def resolvePFNUnavailable( self, problematicDict ):
""" This takes the problematic dictionary returned by the integrity DB and resolved the PFNUnavailable prognosis
"""
pfn = problematicDict['PFN']
se = problematicDict['SE']
fileID = problematicDict['FileID']
res = Utils.executeSingleFileOrDirWrapper( StorageElement( se ).getFileMetadata( pfn ) )
if ( not res['OK'] ) and ( re.search( 'File does not exist', res['Message'] ) ):
# The file is no longer Unavailable but has now dissapeared completely
gLogger.info( "PFNUnavailable replica (%d) found to be missing. Updating prognosis" % fileID )
return self.changeProblematicPrognosis( fileID, 'PFNMissing' )
if ( not res['OK'] ) or res['Value']['Unavailable']:
gLogger.info( "PFNUnavailable replica (%d) found to still be Unavailable" % fileID )
return self.incrementProblematicRetry( fileID )
if res['Value']['Lost']:
gLogger.info( "PFNUnavailable replica (%d) is now found to be Lost. Updating prognosis" % fileID )
return self.changeProblematicPrognosis( fileID, 'PFNLost' )
gLogger.info( "PFNUnavailable replica (%d) is no longer Unavailable" % fileID )
# Need to make the replica okay in the Catalog
return self.__updateReplicaToChecked( problematicDict )
def resolvePFNZeroSize( self, problematicDict ):
""" This takes the problematic dictionary returned by the integrity DB and resolves the PFNZeroSize prognosis
"""
pfn = problematicDict['PFN']
seName = problematicDict['SE']
示例4: execute
# 需要导入模块: from DIRAC.DataManagementSystem.Client.DataManager import DataManager [as 别名]
# 或者: from DIRAC.DataManagementSystem.Client.DataManager.DataManager import replicateAndRegister [as 别名]
#.........这里部分代码省略.........
return resultOLfn
userOutputLFNs = resultOLfn['Value']
self.log.verbose('Calling getCandidateFiles( %s, %s, %s)' % (outputList, userOutputLFNs, self.outputDataFileMask))
self.log.debug("IgnoreAppErrors? '%s' " % self.ignoreapperrors)
resultCF = self.getCandidateFiles(outputList, userOutputLFNs, self.outputDataFileMask)
if not resultCF['OK']:
if not self.ignoreapperrors:
self.log.error(resultCF['Message'])
self.setApplicationStatus(resultCF['Message'])
return S_OK()
fileDict = resultCF['Value']
resultFMD = self.getFileMetadata(fileDict)
if not resultFMD['OK']:
if not self.ignoreapperrors:
self.log.error(resultFMD['Message'])
self.setApplicationStatus(resultFMD['Message'])
return S_OK()
if not resultFMD['Value']:
if not self.ignoreapperrors:
self.log.info('No output data files were determined to be uploaded for this workflow')
self.setApplicationStatus('No Output Data Files To Upload')
return S_OK()
fileMetadata = resultFMD['Value']
#First get the local (or assigned) SE to try first for upload and others in random fashion
resultSEL = getDestinationSEList('Tier1-USER', DIRAC.siteName(), outputmode='local')
if not resultSEL['OK']:
self.log.error('Could not resolve output data SE', resultSEL['Message'])
self.setApplicationStatus('Failed To Resolve OutputSE')
return resultSEL
localSE = resultSEL['Value']
orderedSEs = [ se for se in self.defaultOutputSE if se not in localSE and se not in self.userOutputSE]
orderedSEs = localSE + List.randomize(orderedSEs)
if self.userOutputSE:
prependSEs = []
for userSE in self.userOutputSE:
if not userSE in orderedSEs:
prependSEs.append(userSE)
orderedSEs = prependSEs + orderedSEs
self.log.info('Ordered list of output SEs is: %s' % (', '.join(orderedSEs)))
final = {}
for fileName, metadata in fileMetadata.iteritems():
final[fileName] = metadata
final[fileName]['resolvedSE'] = orderedSEs
#At this point can exit and see exactly what the module will upload
self.printOutputInfo(final)
if not self.enable:
return S_OK('Module is disabled by control flag')
#Instantiate the failover transfer client with the global request object
failoverTransfer = FailoverTransfer(self._getRequestContainer())
#One by one upload the files with failover if necessary
filesToReplicate = {}
filesToFailover = {}
filesUploaded = []
if not self.failoverTest:
self.transferAndRegisterFiles(final, failoverTransfer, filesToFailover, filesUploaded, filesToReplicate)
else:
filesToFailover = final
##if there are files to be failovered, we do it now
resultTRFF = self.transferRegisterAndFailoverFiles(failoverTransfer, filesToFailover, filesUploaded)
cleanUp = resultTRFF['Value']['cleanUp']
#For files correctly uploaded must report LFNs to job parameters
if filesUploaded:
report = ', '.join( filesUploaded )
self.jobReport.setJobParameter( 'UploadedOutputData', report )
self.workflow_commons['Request'] = failoverTransfer.request
#If some or all of the files failed to be saved to failover
if cleanUp:
#Leave any uploaded files just in case it is useful for the user
#do not try to replicate any files.
return S_ERROR('Failed To Upload Output Data')
#If there is now at least one replica for uploaded files can trigger replication
datMan = DataManager( catalogs = self.userFileCatalog )
self.log.info('Sleeping for 10 seconds before attempting replication of recently uploaded files')
time.sleep(10)
for lfn, repSE in filesToReplicate.items():
resultRAR = datMan.replicateAndRegister(lfn, repSE)
if not resultRAR['OK']:
self.log.info('Replication failed with below error but file already exists in Grid storage with \
at least one replica:\n%s' % (resultRAR))
self.generateFailoverFile()
self.setApplicationStatus('Job Finished Successfully')
return S_OK('Output data uploaded')
示例5: RecursiveCp
# 需要导入模块: from DIRAC.DataManagementSystem.Client.DataManager import DataManager [as 别名]
# 或者: from DIRAC.DataManagementSystem.Client.DataManager.DataManager import replicateAndRegister [as 别名]
class RecursiveCp(object):
def __init__(self):
self.__rpcclient = RPCClient( "DataManagement/FileCatalog" )
self.__dm = DataManager()
self.__n_files = 0
self.__space_copied = 0L
self.__badfiles = set()
def search_directory(self, directory_path, source_se, dest_se, dry_run=True):
"""
finds all files and subdirectories in a directory
"""
print "Searching directory: %s" % directory_path
dir_content = self.__rpcclient.listDirectory(directory_path, False)
if not dir_content["OK"]:
print "Failed to contact DIRAC server for %s" % directory_path
return
if directory_path in dir_content['Value']['Failed']:
print "Could not access %s, maybe it doesn't exist?" % directory_path
return
subdirs = dir_content['Value']['Successful'][directory_path]['SubDirs']
for subdir in subdirs.keys():
self.search_directory(subdir, source_se, dest_se, dry_run)
# Now do files...
files = dir_content['Value']['Successful'][directory_path]['Files']
for filename in files.keys():
fullpath = os.path.join(directory_path, filename)
if self.copy_file(fullpath, source_se, dest_se, dry_run):
self.__n_files += 1
self.__space_copied += files[filename]['MetaData']['Size']
def copy_file(self, filename, source_se, dest_se, dry_run=True):
"""
copies a file from on SE to another and registers it in
the dirac file catalogue
"""
res = self.__rpcclient.getReplicas(filename, False)
if not res["OK"]:
print "Could not get replica status for %s" % filename
return False
ses = res['Value']['Successful'][filename].keys()
if not source_se in ses:
# print "File %s not at source SE" % filename
return False
if (source_se in ses) and (not dest_se in ses):
print "%s" % filename
if not dry_run:
res = self.__dm.replicateAndRegister(filename, dest_se, source_se)
if not res['OK']:
print "Replicate and register failed for: %s" % filename
print res
sleep(5)
print "Trying again to register %s" %filename
gLogger.setLevel( "DEBUG" )
gLogger.showHeaders( True )
res = self.__dm.replicateAndRegister(filename, dest_se, source_se)
gLogger.setLevel( "INFO" )
gLogger.showHeaders( False )
if not res['OK']:
print "Replicate and register failed again for: %s" % filename
self.__badfiles.add(filename)
return False
return True
# file already exists on destination SE
return False
def print_stats(self):
"""Prints summary"""
print ""
print "Number of files copied: %s" % self.__n_files
space = self.__space_copied/(1024.0 * 1024.0 * 1024.0)
print "Data copied: %0.3f GB" % space
print "Number of failed copies: %s" %len(self.__badfiles)
f=open("badfiles.txt", "w")
f.write('\n'.join(self.__badfiles))
f.write('\n')
f.close()
示例6: DataIntegrityClient
# 需要导入模块: from DIRAC.DataManagementSystem.Client.DataManager import DataManager [as 别名]
# 或者: from DIRAC.DataManagementSystem.Client.DataManager.DataManager import replicateAndRegister [as 别名]
#.........这里部分代码省略.........
if not res['Value']:
gLogger.info( "PFNMissing file (%d) no longer exists in catalog" % fileID )
return self.__updateCompletedFiles( 'PFNMissing', fileID )
res = returnSingleResult( StorageElement( se ).exists( lfn ) )
if not res['OK']:
return self.__returnProblematicError( fileID, res )
if res['Value']:
gLogger.info( "PFNMissing replica (%d) is no longer missing" % fileID )
return self.__updateReplicaToChecked( problematicDict )
gLogger.info( "PFNMissing replica (%d) does not exist" % fileID )
res = returnSingleResult( self.fc.getReplicas( lfn, allStatus = True ) )
if not res['OK']:
return self.__returnProblematicError( fileID, res )
replicas = res['Value']
seSite = se.split( '_' )[0].split( '-' )[0]
found = False
print replicas
for replicaSE in replicas.keys():
if re.search( seSite, replicaSE ):
found = True
problematicDict['SE'] = replicaSE
se = replicaSE
if not found:
gLogger.info( "PFNMissing replica (%d) is no longer registered at SE. Resolved." % fileID )
return self.__updateCompletedFiles( 'PFNMissing', fileID )
gLogger.info( "PFNMissing replica (%d) does not exist. Removing from catalog..." % fileID )
res = returnSingleResult( self.fc.removeReplica( {lfn:problematicDict} ) )
if not res['OK']:
return self.__returnProblematicError( fileID, res )
if len( replicas ) == 1:
gLogger.info( "PFNMissing replica (%d) had a single replica. Updating prognosis" % fileID )
return self.changeProblematicPrognosis( fileID, 'LFNZeroReplicas' )
res = self.dm.replicateAndRegister( problematicDict['LFN'], se )
if not res['OK']:
return self.__returnProblematicError( fileID, res )
# If we get here the problem is solved so we can update the integrityDB
return self.__updateCompletedFiles( 'PFNMissing', fileID )
#FIXME: Unused?
def resolvePFNUnavailable( self, problematicDict ):
""" This takes the problematic dictionary returned by the integrity DB and resolved the PFNUnavailable prognosis
"""
lfn = problematicDict['LFN']
se = problematicDict['SE']
fileID = problematicDict['FileID']
res = returnSingleResult( StorageElement( se ).getFileMetadata( lfn ) )
if ( not res['OK'] ) and ( re.search( 'File does not exist', res['Message'] ) ):
# The file is no longer Unavailable but has now dissapeared completely
gLogger.info( "PFNUnavailable replica (%d) found to be missing. Updating prognosis" % fileID )
return self.changeProblematicPrognosis( fileID, 'PFNMissing' )
if ( not res['OK'] ) or res['Value']['Unavailable']:
gLogger.info( "PFNUnavailable replica (%d) found to still be Unavailable" % fileID )
return self.incrementProblematicRetry( fileID )
if res['Value']['Lost']:
gLogger.info( "PFNUnavailable replica (%d) is now found to be Lost. Updating prognosis" % fileID )
return self.changeProblematicPrognosis( fileID, 'PFNLost' )
gLogger.info( "PFNUnavailable replica (%d) is no longer Unavailable" % fileID )
# Need to make the replica okay in the Catalog
return self.__updateReplicaToChecked( problematicDict )
#FIXME: Unused?
def resolvePFNZeroSize( self, problematicDict ):
""" This takes the problematic dictionary returned by the integrity DB and resolves the PFNZeroSize prognosis
"""