本文整理汇总了Python中rucio.client.Client.add_replication_rule方法的典型用法代码示例。如果您正苦于以下问题:Python Client.add_replication_rule方法的具体用法?Python Client.add_replication_rule怎么用?Python Client.add_replication_rule使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类rucio.client.Client
的用法示例。
在下文中一共展示了Client.add_replication_rule方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: registerDatasetLocation
# 需要导入模块: from rucio.client import Client [as 别名]
# 或者: from rucio.client.Client import add_replication_rule [as 别名]
def registerDatasetLocation(self,dsn,rses,lifetime=None,owner=None):
if lifetime != None:
lifetime = lifetime*24*60*60
scope,dsn = self.extract_scope(dsn)
dids = []
did = {'scope': scope, 'name': dsn}
dids.append(did)
# make location
rses.sort()
location = '|'.join(rses)
# check if a replication rule already exists
client = RucioClient()
# owner
if owner == None:
owner = client.account
for rule in client.list_did_rules(scope=scope, name=dsn):
if (rule['rse_expression'] == location) and (rule['account'] == client.account):
return True
try:
client.add_replication_rule(dids=dids,copies=1,rse_expression=location,weight=None,
lifetime=lifetime, grouping='DATASET', account=owner,
locked=False, notify='N',ignore_availability=True)
except Duplicate:
pass
return True
示例2: registerFiles
# 需要导入模块: from rucio.client import Client [as 别名]
# 或者: from rucio.client.Client import add_replication_rule [as 别名]
def registerFiles(self,files,rse):
client = RucioClient()
try:
# add replicas
client.add_replicas(files=files,rse=rse)
except FileAlreadyExists:
pass
try:
# add rule
client.add_replication_rule(files,copies=1,rse_expression=rse)
except DuplicateRule:
pass
示例3: trigger_stage_out
# 需要导入模块: from rucio.client import Client [as 别名]
# 或者: from rucio.client.Client import add_replication_rule [as 别名]
#.........这里部分代码省略.........
return None,errStr
# print out the file list
tmpLog.debug('fileList - {0}'.format(fileList))
# create the dataset and add files to it and create a transfer rule
try:
# register dataset
rucioAPI = RucioClient()
tmpLog.debug('register {0}:{1} rse = {2} meta=(hidden: True) lifetime = {3}'
.format(datasetScope, datasetName,srcRSE,(30*24*60*60)))
try:
rucioAPI.add_dataset(datasetScope, datasetName,
meta={'hidden': True},
lifetime=30 * 24 * 60 * 60,
rse=srcRSE
)
except DataIdentifierAlreadyExists:
# ignore even if the dataset already exists
pass
except Exception:
errMsg = 'Could not create dataset {0}:{1} srcRSE - {2}'.format(datasetScope,
datasetName,
srcRSE)
core_utils.dump_error_message(tmpLog)
tmpLog.error(errMsg)
return None,errMsg
# add files to dataset
# add 500 files at a time
numfiles = len(fileList)
maxfiles = 500
numslices = numfiles/maxfiles
if (numfiles%maxfiles) > 0 :
numslices = numslices + 1
start = 0
for i in range(numslices) :
try:
stop = start + maxfiles
if stop > numfiles :
stop = numfiles
rucioAPI.add_files_to_datasets([{'scope': datasetScope,
'name': datasetName,
'dids': fileList[start:stop],
'rse': srcRSE}],
ignore_duplicate=True)
start = stop
except FileAlreadyExists:
# ignore if files already exist
pass
except Exception:
errMsg = 'Could not add files to DS - {0}:{1} rse - {2} files - {3}'.format(datasetScope,
datasetName,
srcRSE,
fileList)
core_utils.dump_error_message(tmpLog)
tmpLog.error(errMsg)
return None,errMsg
# add rule
try:
tmpDID = dict()
tmpDID['scope'] = datasetScope
tmpDID['name'] = datasetName
tmpRet = rucioAPI.add_replication_rule([tmpDID], 1, dstRSE,
lifetime=30 * 24 * 60 * 60)
ruleIDs = tmpRet[0]
tmpLog.debug('registered dataset {0}:{1} with rule {2}'.format(datasetScope, datasetName,
str(ruleIDs)))
# group the output files together by the Rucio transfer rule
jobspec.set_groups_to_files({ruleIDs: {'lfns': lfns,'groupStatus': 'pending'}})
msgStr = 'jobspec.set_groups_to_files -Rucio rule - {0}, lfns - {1}, groupStatus - pending'.format(ruleIDs,lfns)
tmpLog.debug(msgStr)
tmpLog.debug('call self.dbInterface.set_file_group(jobspec.get_output_file_specs(skip_done=True),ruleIDs,pending)')
tmpStat = self.dbInterface.set_file_group(jobspec.get_output_file_specs(skip_done=True),ruleIDs,'transferring')
tmpLog.debug('called self.dbInterface.set_file_group(jobspec.get_output_file_specs(skip_done=True),ruleIDs,transferring)')
tmpStat = True
tmpMsg = 'created Rucio rule successfully'
except DuplicateRule:
# ignore duplicated rule
tmpLog.debug('rule is already available')
except Exception:
errMsg = 'Error creating rule for dataset {0}:{1}'.format(datasetScope, datasetName)
core_utils.dump_error_message(tmpLog)
tmpLog.debug(errMsg)
return None,errMsg
# update file group status
self.dbInterface.update_file_group_status(ruleIDs, 'transferring')
except Exception:
core_utils.dump_error_message(tmpLog)
# treat as a temporary error
tmpStat = None
tmpMsg = 'failed to add a rule for {0}:{1}'.format(datasetScope, datasetName)
# Now test for any errors
if errors:
for error in errors:
tmpLog.debug('copy error source {0} destination {1} Reason {2}'.format(error[0],error[1],error[2]))
raise Error(errors)
# otherwise we are OK
tmpLog.debug('stop')
return tmpStat,tmpMsg
示例4: trigger_stage_out
# 需要导入模块: from rucio.client import Client [as 别名]
# 或者: from rucio.client.Client import add_replication_rule [as 别名]
def trigger_stage_out(self, jobspec):
# make logger
tmpLog = self.make_logger(baseLogger, 'PandaID={0}'.format(jobspec.PandaID),
method_name='trigger_stage_out')
tmpLog.debug('start')
# loop over all files
files = dict()
transferIDs = dict()
transferDatasets = dict()
fileAttrs = jobspec.get_output_file_attributes()
for fileSpec in jobspec.outFiles:
# skip zipped files
if fileSpec.zipFileID is not None:
continue
# skip if already processed
if 'transferDataset' in fileSpec.fileAttributes:
if fileSpec.fileType not in transferDatasets:
transferDatasets[fileSpec.fileType] = fileSpec.fileAttributes['transferDataset']
if fileSpec.fileType not in transferIDs:
transferIDs[fileSpec.fileType] = fileSpec.fileAttributes['transferID']
continue
# set OS ID
if fileSpec.fileType == ['es_output', 'zip_output']:
fileSpec.objstoreID = self.objStoreID_ES
# make path where file is copied for transfer
if fileSpec.fileType != 'zip_output':
scope = fileAttrs[fileSpec.lfn]['scope']
datasetName = fileAttrs[fileSpec.lfn]['dataset']
else:
# use panda scope for zipped files
scope = self.scopeForTmp
datasetName = 'dummy'
srcPath = fileSpec.path
dstPath = mover_utils.construct_file_path(self.srcBasePath, scope, fileSpec.lfn)
# remove
if os.path.exists(dstPath):
os.remove(dstPath)
# copy
tmpLog.debug('copy src={srcPath} dst={dstPath}'.format(srcPath=srcPath, dstPath=dstPath))
dstDir = os.path.dirname(dstPath)
if not os.path.exists(dstDir):
os.makedirs(dstDir)
shutil.copyfile(srcPath, dstPath)
# collect files
tmpFile = dict()
tmpFile['scope'] = scope
tmpFile['name'] = fileSpec.lfn
tmpFile['bytes'] = fileSpec.fsize
if fileSpec.fileType not in files:
files[fileSpec.fileType] = []
files[fileSpec.fileType].append(tmpFile)
# loop over all file types to be registered to rucio
rucioAPI = RucioClient()
for fileType, fileList in iteritems(files):
# set destination RSE
if fileType in ['es_output', 'zip_output']:
dstRSE = self.dstRSE_ES
elif fileType == 'output':
dstRSE = self.dstRSE_Out
elif fileType == 'log':
dstRSE = self.dstRSE_Log
else:
errMsg = 'unsupported file type {0}'.format(fileType)
tmpLog.error(errMsg)
return (False, errMsg)
# skip if destination is None
if dstRSE is None:
continue
# make datasets if missing
if fileType not in transferDatasets:
try:
tmpScope = self.scopeForTmp
tmpDS = 'panda.harvester_stage_out.{0}'.format(str(uuid.uuid4()))
rucioAPI.add_dataset(tmpScope, tmpDS,
meta={'hidden': True},
lifetime=30*24*60*60,
files=fileList,
rse=self.srcRSE
)
transferDatasets[fileType] = tmpDS
# add rule
tmpDID = dict()
tmpDID['scope'] = tmpScope
tmpDID['name'] = tmpDS
tmpRet = rucioAPI.add_replication_rule([tmpDID], 1, dstRSE,
lifetime=30*24*60*60
)
tmpTransferIDs = tmpRet[0]
transferIDs[fileType] = tmpTransferIDs
tmpLog.debug('register dataset {0} with rule {1}'.format(tmpDS, str(tmpTransferIDs)))
except:
errMsg = core_utils.dump_error_message(tmpLog)
return (False, errMsg)
else:
# add files to existing dataset
try:
tmpScope = self.scopeForTmp
tmpDS = transferDatasets[fileType]
rucioAPI.add_files_to_dataset(tmpScope, tmpDS, fileList, self.srcRSE)
tmpLog.debug('added files to {0}'.format(tmpDS))
#.........这里部分代码省略.........
示例5: trigger_stage_out
# 需要导入模块: from rucio.client import Client [as 别名]
# 或者: from rucio.client.Client import add_replication_rule [as 别名]
def trigger_stage_out(self, jobspec):
# make logger
tmpLog = self.make_logger(_logger, 'PandaID={0} ThreadID={1} '.format(jobspec.PandaID,
threading.current_thread().ident),
method_name='trigger_stage_out')
tmpLog.debug('executing base trigger_stage_out')
tmpStat, tmpMsg = YodaRseDirect.trigger_stage_out(self, jobspec)
tmpLog.debug('got {0} {1}'.format(tmpStat, tmpMsg))
if tmpStat is not True:
return tmpStat, tmpMsg
# Now that output files have been all copied to Local RSE register transient dataset
# loop over all transfers
tmpStat = None
tmpMsg = ''
srcRSE = None
dstRSE = None
datasetName = 'panda.harvester.{0}.{1}'.format(jobspec.PandaID,str(uuid.uuid4()))
datasetScope = 'transient'
# get destination endpoint
nucleus = jobspec.jobParams['nucleus']
agis = self.dbInterface.get_cache('panda_queues.json').data
dstRSE = [agis[x]["astorages"]['pr'][0] for x in agis if agis[x]["atlas_site"] == nucleus][0]
# get the list of output files to transfer
fileSpecs = jobspec.get_output_file_specs(skip_done=True)
fileList = []
lfns = []
for fileSpec in fileSpecs:
tmpFile = dict()
tmpFile['scope'] = datasetScope
tmpFile['name'] = fileSpec.lfn
tmpFile['bytes'] = fileSpec.fsize
tmpFile['adler32'] = fileSpec.chksum
tmpFile['meta'] = {'guid': fileSpec.fileAttributes['guid']}
fileList.append(tmpFile)
lfns.append(fileSpec.lfn)
# get source RSE
if srcRSE is None and fileSpec.objstoreID is not None:
ddm = self.dbInterface.get_cache('agis_ddmendpoints.json').data
srcRSE = [x for x in ddm if ddm[x]["id"] == fileSpec.objstoreID][0]
# test that srcRSE and dstRSE are defined
errStr = ''
if srcRSE is None:
errStr = 'Source RSE is not defined '
if dstRSE is None:
errStr = errStr + ' Desitination RSE is not defined'
if (srcRSE is None) or (dstRSE is None) :
tmpLog.error(errStr)
return False,errStr
# create the dataset and add files to it and create a transfer rule
try:
# register dataset
tmpLog.debug('register {0}:{1}'.format(datasetScope, datasetName))
rucioAPI = RucioClient()
try:
rucioAPI.add_dataset(datasetScope, datasetName,
meta={'hidden': True},
lifetime=30 * 24 * 60 * 60,
rse=srcRSE
)
except DataIdentifierAlreadyExists:
# ignore even if the dataset already exists
pass
except Exception:
tmpLog.error('Could not create dataset with scope: {0} Name: {1} in Rucio'
.format(datasetScope,datasetName))
raise
# add files to dataset
try:
rucioAPI.add_files_to_datasets([{'scope': datasetScope,
'name': datasetName,
'dids': fileList,
'rse': srcRSE}],
ignore_duplicate=True)
except FileAlreadyExists:
# ignore if files already exist
pass
except Exception:
tmpLog.error('Could add files to dataset with scope: {0} Name: {1} in Rucio'
.format(datasetScope,datasetName))
raise
# add rule
try:
tmpDID = dict()
tmpDID['scope'] = datasetScope
tmpDID['name'] = datasetName
tmpRet = rucioAPI.add_replication_rule([tmpDID], 1, dstRSE,
lifetime=30 * 24 * 60 * 60)
ruleIDs = tmpRet[0]
tmpLog.debug('registered dataset {0}:{1} with rule {2}'.format(datasetScope, datasetName,
str(ruleIDs)))
# group the output files together by the Rucio transfer rule
jobspec.set_groups_to_files({ruleIDs: {'lfns': lfns,'groupStatus': 'pending'}})
msgStr = 'jobspec.set_groups_to_files -Rucio rule - {0}, lfns - {1}, groupStatus - pending'.format(ruleIDs,lfns)
tmpLog.debug(msgStr)
#.........这里部分代码省略.........