本文整理汇总了Python中taskbuffer.JobSpec.JobSpec.sourceSite方法的典型用法代码示例。如果您正苦于以下问题:Python JobSpec.sourceSite方法的具体用法?Python JobSpec.sourceSite怎么用?Python JobSpec.sourceSite使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类taskbuffer.JobSpec.JobSpec
的用法示例。
在下文中一共展示了JobSpec.sourceSite方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
# 需要导入模块: from taskbuffer.JobSpec import JobSpec [as 别名]
# 或者: from taskbuffer.JobSpec.JobSpec import sourceSite [as 别名]
#.........这里部分代码省略.........
if tmpFile.type in ['log','output']:
lfnsStr += '%s,' % tmpFile.lfn
guidStr += '%s,' % tmpFile.GUID
if lfnsStr != '':
guidStr = guidStr[:-1]
lfnsStr = lfnsStr[:-1]
# create a DDM job
ddmjob = JobSpec()
ddmjob.jobDefinitionID = int(time.time()) % 10000
ddmjob.jobName = "%s" % commands.getoutput('uuidgen')
ddmjob.transformation = 'http://pandaserver.cern.ch:25080/trf/mover/run_dq2_cr'
ddmjob.destinationDBlock = 'testpanda.%s' % ddmjob.jobName
ddmjob.computingSite = "BNL_ATLAS_DDM"
ddmjob.destinationSE = ddmjob.computingSite
ddmjob.currentPriority = 200000
ddmjob.prodSourceLabel = 'ddm'
ddmjob.transferType = 'sub'
# append log file
fileOL = FileSpec()
fileOL.lfn = "%s.job.log.tgz" % ddmjob.jobName
fileOL.destinationDBlock = ddmjob.destinationDBlock
fileOL.destinationSE = ddmjob.destinationSE
fileOL.dataset = ddmjob.destinationDBlock
fileOL.type = 'log'
ddmjob.addFile(fileOL)
# make arguments
dstDQ2ID = 'BNLPANDA'
srcDQ2ID = self.siteMapper.getSite(self.job.computingSite).ddm
callBackURL = 'https://%s:%s/server/panda/datasetCompleted?vuid=%s&site=%s' % \
(panda_config.pserverhost,panda_config.pserverport,
dataset.vuid,dstDQ2ID)
_logger.debug(callBackURL)
# set src/dest
ddmjob.sourceSite = srcDQ2ID
ddmjob.destinationSite = dstDQ2ID
# if src==dst, send callback without ddm job
if dstDQ2ID == srcDQ2ID:
comout = commands.getoutput('curl -k %s' % callBackURL)
_logger.debug(comout)
else:
# run dq2_cr
callBackURL = urllib.quote(callBackURL)
# get destination dir
destDir = brokerage.broker_util._getDefaultStorage(self.siteMapper.getSite(self.job.computingSite).dq2url)
argStr = "-s %s -r %s --guids %s --lfns %s --callBack %s -d %s/%s %s" % \
(srcDQ2ID,dstDQ2ID,guidStr,lfnsStr,callBackURL,destDir,
destinationDBlock,destinationDBlock)
# set job parameters
ddmjob.jobParameters = argStr
_logger.debug('%s pdq2_cr %s' % (self.pandaID,ddmjob.jobParameters))
ddmJobs.append(ddmjob)
# start Activator
if re.search('_sub\d+$',dataset.name) == None:
if self.job.prodSourceLabel=='panda' and self.job.processingType in ['merge','unmerge']:
# don't trigger Activator for merge jobs
pass
else:
if self.job.jobStatus == 'finished':
aThr = Activator(self.taskBuffer,dataset)
aThr.start()
aThr.join()
else:
# unset flag since another thread already updated
#flagComplete = False
pass
else: