本文整理汇总了Python中taskbuffer.JobSpec.JobSpec.transferType方法的典型用法代码示例。如果您正苦于以下问题:Python JobSpec.transferType方法的具体用法?Python JobSpec.transferType怎么用?Python JobSpec.transferType使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类taskbuffer.JobSpec.JobSpec
的用法示例。
在下文中一共展示了JobSpec.transferType方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
# 需要导入模块: from taskbuffer.JobSpec import JobSpec [as 别名]
# 或者: from taskbuffer.JobSpec.JobSpec import transferType [as 别名]
#.........这里部分代码省略.........
topUserDsList.append(unmergedDsName)
# update DB
retTopT = self.taskBuffer.updateDatasets([unmergedDs],withLock=True,withCriteria="status<>:crStatus",
criteriaMap={':crStatus':unmergedDs.status})
if len(retTopT) > 0 and retTopT[0]==1:
_logger.debug('%s set %s to parent dataset : %s' % (self.pandaID,unmergedDs.status,unmergedDsName))
else:
_logger.debug('%s failed to update parent dataset : %s' % (self.pandaID,unmergedDsName))
if self.pandaDDM and self.job.prodSourceLabel=='managed':
# instantiate SiteMapper
if self.siteMapper == None:
self.siteMapper = SiteMapper(self.taskBuffer)
# get file list for PandaDDM
retList = self.taskBuffer.queryFilesWithMap({'destinationDBlock':destinationDBlock})
lfnsStr = ''
guidStr = ''
for tmpFile in retList:
if tmpFile.type in ['log','output']:
lfnsStr += '%s,' % tmpFile.lfn
guidStr += '%s,' % tmpFile.GUID
if lfnsStr != '':
guidStr = guidStr[:-1]
lfnsStr = lfnsStr[:-1]
# create a DDM job
ddmjob = JobSpec()
ddmjob.jobDefinitionID = int(time.time()) % 10000
ddmjob.jobName = "%s" % commands.getoutput('uuidgen')
ddmjob.transformation = 'http://pandaserver.cern.ch:25080/trf/mover/run_dq2_cr'
ddmjob.destinationDBlock = 'testpanda.%s' % ddmjob.jobName
ddmjob.computingSite = "BNL_ATLAS_DDM"
ddmjob.destinationSE = ddmjob.computingSite
ddmjob.currentPriority = 200000
ddmjob.prodSourceLabel = 'ddm'
ddmjob.transferType = 'sub'
# append log file
fileOL = FileSpec()
fileOL.lfn = "%s.job.log.tgz" % ddmjob.jobName
fileOL.destinationDBlock = ddmjob.destinationDBlock
fileOL.destinationSE = ddmjob.destinationSE
fileOL.dataset = ddmjob.destinationDBlock
fileOL.type = 'log'
ddmjob.addFile(fileOL)
# make arguments
dstDQ2ID = 'BNLPANDA'
srcDQ2ID = self.siteMapper.getSite(self.job.computingSite).ddm
callBackURL = 'https://%s:%s/server/panda/datasetCompleted?vuid=%s&site=%s' % \
(panda_config.pserverhost,panda_config.pserverport,
dataset.vuid,dstDQ2ID)
_logger.debug(callBackURL)
# set src/dest
ddmjob.sourceSite = srcDQ2ID
ddmjob.destinationSite = dstDQ2ID
# if src==dst, send callback without ddm job
if dstDQ2ID == srcDQ2ID:
comout = commands.getoutput('curl -k %s' % callBackURL)
_logger.debug(comout)
else:
# run dq2_cr
callBackURL = urllib.quote(callBackURL)
# get destination dir
destDir = brokerage.broker_util._getDefaultStorage(self.siteMapper.getSite(self.job.computingSite).dq2url)
argStr = "-s %s -r %s --guids %s --lfns %s --callBack %s -d %s/%s %s" % \
(srcDQ2ID,dstDQ2ID,guidStr,lfnsStr,callBackURL,destDir,
destinationDBlock,destinationDBlock)
# set job parameters
ddmjob.jobParameters = argStr
示例2: prepare
# 需要导入模块: from taskbuffer.JobSpec import JobSpec [as 别名]
# 或者: from taskbuffer.JobSpec.JobSpec import transferType [as 别名]
def prepare(self, app, appsubconfig, appmasterconfig, jobmasterconfig):
"""Prepare the specific aspec of each subjob.
Returns: subjobconfig list of objects understood by backends."""
from pandatools import Client
from pandatools import AthenaUtils
from taskbuffer.JobSpec import JobSpec
from taskbuffer.FileSpec import FileSpec
from GangaAtlas.Lib.ATLASDataset.DQ2Dataset import dq2_set_dataset_lifetime
from GangaPanda.Lib.Panda.Panda import refreshPandaSpecs
# make sure we have the correct siteType
refreshPandaSpecs()
job = app._getParent()
masterjob = job._getRoot()
logger.debug('ProdTransPandaRTHandler prepare called for %s',
job.getFQID('.'))
job.backend.actualCE = job.backend.site
job.backend.requirements.cloud = Client.PandaSites[job.backend.site]['cloud']
# check that the site is in a submit-able status
if not job.splitter or job.splitter._name != 'DQ2JobSplitter':
allowed_sites = job.backend.list_ddm_sites()
try:
outDsLocation = Client.PandaSites[job.backend.site]['ddm']
tmpDsExist = False
if (configPanda['processingType'].startswith('gangarobot') or configPanda['processingType'].startswith('hammercloud')):
#if Client.getDatasets(job.outputdata.datasetname):
if getDatasets(job.outputdata.datasetname):
tmpDsExist = True
logger.info('Re-using output dataset %s'%job.outputdata.datasetname)
if not configPanda['specialHandling']=='ddm:rucio' and not configPanda['processingType'].startswith('gangarobot') and not configPanda['processingType'].startswith('hammercloud') and not configPanda['processingType'].startswith('rucio_test'):
Client.addDataset(job.outputdata.datasetname,False,location=outDsLocation,allowProdDisk=True,dsExist=tmpDsExist)
logger.info('Output dataset %s registered at %s'%(job.outputdata.datasetname,outDsLocation))
dq2_set_dataset_lifetime(job.outputdata.datasetname, outDsLocation)
except exceptions.SystemExit:
raise BackendError('Panda','Exception in adding dataset %s: %s %s'%(job.outputdata.datasetname,sys.exc_info()[0],sys.exc_info()[1]))
# JobSpec.
jspec = JobSpec()
jspec.currentPriority = app.priority
jspec.jobDefinitionID = masterjob.id
jspec.jobName = commands.getoutput('uuidgen 2> /dev/null')
jspec.coreCount = app.core_count
jspec.AtlasRelease = 'Atlas-%s' % app.atlas_release
jspec.homepackage = app.home_package
jspec.transformation = app.transformation
# set the transfer type (e.g. for directIO tests)
if job.backend.requirements.transfertype != '':
jspec.transferType = job.backend.requirements.transfertype
jspec.destinationDBlock = job.outputdata.datasetname
if job.outputdata.location:
jspec.destinationSE = job.outputdata.location
else:
jspec.destinationSE = job.backend.site
if job.inputdata:
jspec.prodDBlock = job.inputdata.dataset[0]
else:
jspec.prodDBlock = 'NULL'
if app.prod_source_label:
jspec.prodSourceLabel = app.prod_source_label
else:
jspec.prodSourceLabel = configPanda['prodSourceLabelRun']
jspec.processingType = configPanda['processingType']
if job.backend.requirements.specialHandling:
jspec.specialHandling = job.backend.requirements.specialHandling
else:
jspec.specialHandling = configPanda['specialHandling']
jspec.computingSite = job.backend.site
jspec.cloud = job.backend.requirements.cloud
jspec.cmtConfig = app.atlas_cmtconfig
if app.dbrelease == 'LATEST':
try:
latest_dbrelease = getLatestDBReleaseCaching()
except:
from pandatools import Client
latest_dbrelease = Client.getLatestDBRelease()
m = re.search('(.*):DBRelease-(.*)\.tar\.gz', latest_dbrelease)
if m:
self.dbrelease_dataset = m.group(1)
self.dbrelease = m.group(2)
else:
raise ApplicationConfigurationError(None, "Error retrieving LATEST DBRelease. Try setting application.dbrelease manually.")
else:
self.dbrelease_dataset = app.dbrelease_dataset
self.dbrelease = app.dbrelease
jspec.jobParameters = app.job_parameters
if self.dbrelease:
if self.dbrelease == 'current':
jspec.jobParameters += ' --DBRelease=current'
else:
if jspec.transformation.endswith("_tf.py") or jspec.transformation.endswith("_tf"):
jspec.jobParameters += ' --DBRelease=DBRelease-%s.tar.gz' % (self.dbrelease,)
#.........这里部分代码省略.........