本文整理汇总了Python中taskbuffer.JobSpec.JobSpec.taskID方法的典型用法代码示例。如果您正苦于以下问题:Python JobSpec.taskID方法的具体用法?Python JobSpec.taskID怎么用?Python JobSpec.taskID使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类taskbuffer.JobSpec.JobSpec
的用法示例。
在下文中一共展示了JobSpec.taskID方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: range
# 需要导入模块: from taskbuffer.JobSpec import JobSpec [as 别名]
# 或者: from taskbuffer.JobSpec.JobSpec import taskID [as 别名]
for i in range(1):
job = JobSpec()
job.jobDefinitionID = int(time.time()) % 10000
job.jobName = "%s_%d" % (commands.getoutput('uuidgen'),i)
# job.AtlasRelease = 'Atlas-12.0.6'
# job.homepackage = 'AtlasProduction/12.0.6.5'
job.AtlasRelease = 'Atlas-12.0.7'
job.homepackage = 'AtlasProduction/12.0.7.1'
job.transformation = 'csc_evgen_trf.py'
job.destinationDBlock = datasetName
# job.destinationSE = destName
# job.cloud = 'CA'
job.cloud = cloud
job.taskID = taskid
job.currentPriority = 1000
job.prodSourceLabel = 'test'
# job.prodSourceLabel = 'cloudtest'
job.computingSite = site
file = FileSpec()
file.lfn = "%s.evgen.pool.root" % job.jobName
file.destinationDBlock = job.destinationDBlock
file.destinationSE = job.destinationSE
file.dataset = job.destinationDBlock
file.type = 'output'
job.addFile(file)
fileOL = FileSpec()
fileOL.lfn = "%s.job.log.tgz" % job.jobName
示例2: doBrokerage
# 需要导入模块: from taskbuffer.JobSpec import JobSpec [as 别名]
# 或者: from taskbuffer.JobSpec.JobSpec import taskID [as 别名]
def doBrokerage(self,inputList,vo,prodSourceLabel,workQueue):
# list with a lock
inputListWorld = ListWithLock([])
# variables for submission
maxBunchTask = 100
# make logger
tmpLog = MsgWrapper(logger)
tmpLog.debug('start doBrokerage')
# return for failure
retFatal = self.SC_FATAL
retTmpError = self.SC_FAILED
tmpLog.debug('vo={0} label={1} queue={2} nTasks={3}'.format(vo,prodSourceLabel,
workQueue.queue_name,
len(inputList)))
# loop over all tasks
allRwMap = {}
prioMap = {}
tt2Map = {}
expRWs = {}
jobSpecList = []
for tmpJediTaskID,tmpInputList in inputList:
for taskSpec,cloudName,inputChunk in tmpInputList:
# collect tasks for WORLD
if taskSpec.useWorldCloud():
inputListWorld.append((taskSpec,inputChunk))
continue
# make JobSpec to be submitted for TaskAssigner
jobSpec = JobSpec()
jobSpec.taskID = taskSpec.jediTaskID
jobSpec.jediTaskID = taskSpec.jediTaskID
# set managed to trigger TA
jobSpec.prodSourceLabel = 'managed'
jobSpec.processingType = taskSpec.processingType
jobSpec.workingGroup = taskSpec.workingGroup
jobSpec.metadata = taskSpec.processingType
jobSpec.assignedPriority = taskSpec.taskPriority
jobSpec.currentPriority = taskSpec.currentPriority
jobSpec.maxDiskCount = (taskSpec.getOutDiskSize() + taskSpec.getWorkDiskSize()) / 1024 / 1024
if taskSpec.useWorldCloud():
# use destinationSE to trigger task brokerage in WORLD cloud
jobSpec.destinationSE = taskSpec.cloud
prodDBlock = None
setProdDBlock = False
for datasetSpec in inputChunk.getDatasets():
prodDBlock = datasetSpec.datasetName
if datasetSpec.isMaster():
jobSpec.prodDBlock = datasetSpec.datasetName
setProdDBlock = True
for fileSpec in datasetSpec.Files:
tmpInFileSpec = fileSpec.convertToJobFileSpec(datasetSpec)
jobSpec.addFile(tmpInFileSpec)
# use secondary dataset name as prodDBlock
if setProdDBlock == False and prodDBlock != None:
jobSpec.prodDBlock = prodDBlock
# append
jobSpecList.append(jobSpec)
prioMap[jobSpec.taskID] = jobSpec.currentPriority
tt2Map[jobSpec.taskID] = jobSpec.processingType
# get RW for a priority
if not allRwMap.has_key(jobSpec.currentPriority):
tmpRW = self.taskBufferIF.calculateRWwithPrio_JEDI(vo,prodSourceLabel,workQueue,
jobSpec.currentPriority)
if tmpRW == None:
tmpLog.error('failed to calculate RW with prio={0}'.format(jobSpec.currentPriority))
return retTmpError
allRwMap[jobSpec.currentPriority] = tmpRW
# get expected RW
expRW = self.taskBufferIF.calculateTaskRW_JEDI(jobSpec.jediTaskID)
if expRW == None:
tmpLog.error('failed to calculate RW for jediTaskID={0}'.format(jobSpec.jediTaskID))
return retTmpError
expRWs[jobSpec.taskID] = expRW
# for old clouds
if jobSpecList != []:
# get fullRWs
fullRWs = self.taskBufferIF.calculateRWwithPrio_JEDI(vo,prodSourceLabel,None,None)
if fullRWs == None:
tmpLog.error('failed to calculate full RW')
return retTmpError
# set metadata
for jobSpec in jobSpecList:
rwValues = allRwMap[jobSpec.currentPriority]
jobSpec.metadata = "%s;%s;%s;%s;%s;%s" % (jobSpec.metadata,
str(rwValues),str(expRWs),
str(prioMap),str(fullRWs),
str(tt2Map))
tmpLog.debug('run task assigner for {0} tasks'.format(len(jobSpecList)))
nBunchTask = 0
while nBunchTask < len(jobSpecList):
# get a bunch
jobsBunch = jobSpecList[nBunchTask:nBunchTask+maxBunchTask]
strIDs = 'jediTaskID='
for tmpJobSpec in jobsBunch:
strIDs += '{0},'.format(tmpJobSpec.taskID)
strIDs = strIDs[:-1]
tmpLog.debug(strIDs)
# increment index
nBunchTask += maxBunchTask
# run task brokerge
stS,outSs = PandaClient.runTaskAssignment(jobsBunch)
#.........这里部分代码省略.........
示例3: str
# 需要导入模块: from taskbuffer.JobSpec import JobSpec [as 别名]
# 或者: from taskbuffer.JobSpec.JobSpec import taskID [as 别名]
job.jobName = "%s_%d" % (commands.getoutput('uuidgen'),i)
job.AtlasRelease = 'Atlas-14.1.0'
job.homepackage = 'AtlasProduction/12.0.6.2'
job.transformation = 'csc_evgen_trf.py'
job.destinationDBlock = datasetName
#job.destinationSE = destName
job.currentPriority = 1000
job.prodSourceLabel = 'managed'
#job.prodSourceLabel = 'test'
#job.computingSite = site
job.cmtConfig = 'i686-slc4-gcc34-opt'
job.metadata = 'evgen;%s;%s;%s' % (str({'FR': 46, 'NL': 45, 'NDGF': 300, 'CERN': 19, 'TW': 44110, 'CA': 2922, 'DE': 9903, 'IT': 1168, 'US': 6226, 'UK': 1026, 'ES': 26619}),str({999907:100,999906:200,999905:300}),str({999905:100,999906:910,999907:500}))
#job.metadata = 'evgen;%s' % str({'FR': 46, 'NL': 45, 'NDGF': 300, 'CERN': 19, 'TW': 44110, 'CA': 2922, 'DE': 9903, 'IT': 1168, 'US': 6226, 'UK': 1026, 'ES': 26619})
#job.cloud = "UK"
job.taskID = i
file = FileSpec()
file.lfn = "%s.evgen.pool.root" % job.jobName
file.destinationDBlock = job.destinationDBlock
file.destinationSE = job.destinationSE
file.dataset = job.destinationDBlock
#file.destinationDBlockToken = 'ATLASDATADISK'
file.type = 'output'
job.addFile(file)
fileOL = FileSpec()
fileOL.lfn = "%s.job.log.tgz" % job.jobName
fileOL.destinationDBlock = job.destinationDBlock
fileOL.destinationSE = job.destinationSE
fileOL.dataset = job.destinationDBlock
示例4: doBrokerage
# 需要导入模块: from taskbuffer.JobSpec import JobSpec [as 别名]
# 或者: from taskbuffer.JobSpec.JobSpec import taskID [as 别名]
def doBrokerage(self, inputList, vo, prodSourceLabel, workQueue):
# variables for submission
maxBunchTask = 100
# make logger
tmpLog = MsgWrapper(logger)
tmpLog.debug("start doBrokerage")
# return for failure
retFatal = self.SC_FATAL
retTmpError = self.SC_FAILED
tmpLog.debug("vo={0} label={1} queue={2}".format(vo, prodSourceLabel, workQueue.queue_name))
# loop over all tasks
allRwMap = {}
prioMap = {}
tt2Map = {}
expRWs = {}
jobSpecList = []
for tmpJediTaskID, tmpInputList in inputList:
for taskSpec, cloudName, inputChunk in tmpInputList:
# make JobSpec to be submitted for TaskAssigner
jobSpec = JobSpec()
jobSpec.taskID = taskSpec.jediTaskID
jobSpec.jediTaskID = taskSpec.jediTaskID
# set managed to trigger TA
jobSpec.prodSourceLabel = "managed"
jobSpec.processingType = taskSpec.processingType
jobSpec.workingGroup = taskSpec.workingGroup
jobSpec.metadata = taskSpec.processingType
jobSpec.assignedPriority = taskSpec.taskPriority
jobSpec.currentPriority = taskSpec.currentPriority
jobSpec.maxDiskCount = (taskSpec.getOutDiskSize() + taskSpec.getWorkDiskSize()) / 1024 / 1024
if taskSpec.useWorldCloud():
# use destinationSE to trigger task brokerage in WORLD cloud
jobSpec.destinationSE = taskSpec.cloud
prodDBlock = None
setProdDBlock = False
for datasetSpec in inputChunk.getDatasets():
prodDBlock = datasetSpec.datasetName
if datasetSpec.isMaster():
jobSpec.prodDBlock = datasetSpec.datasetName
setProdDBlock = True
for fileSpec in datasetSpec.Files:
tmpInFileSpec = fileSpec.convertToJobFileSpec(datasetSpec)
jobSpec.addFile(tmpInFileSpec)
# use secondary dataset name as prodDBlock
if setProdDBlock == False and prodDBlock != None:
jobSpec.prodDBlock = prodDBlock
# append
jobSpecList.append(jobSpec)
prioMap[jobSpec.taskID] = jobSpec.currentPriority
tt2Map[jobSpec.taskID] = jobSpec.processingType
# get RW for a priority
if not allRwMap.has_key(jobSpec.currentPriority):
tmpRW = self.taskBufferIF.calculateRWwithPrio_JEDI(
vo, prodSourceLabel, workQueue, jobSpec.currentPriority
)
if tmpRW == None:
tmpLog.error("failed to calculate RW with prio={0}".format(jobSpec.currentPriority))
return retTmpError
allRwMap[jobSpec.currentPriority] = tmpRW
# get expected RW
expRW = self.taskBufferIF.calculateTaskRW_JEDI(jobSpec.jediTaskID)
if expRW == None:
tmpLog.error("failed to calculate RW for jediTaskID={0}".format(jobSpec.jediTaskID))
return retTmpError
expRWs[jobSpec.taskID] = expRW
# get fullRWs
fullRWs = self.taskBufferIF.calculateRWwithPrio_JEDI(vo, prodSourceLabel, None, None)
if fullRWs == None:
tmpLog.error("failed to calculate full RW")
return retTmpError
# set metadata
for jobSpec in jobSpecList:
rwValues = allRwMap[jobSpec.currentPriority]
jobSpec.metadata = "%s;%s;%s;%s;%s;%s" % (
jobSpec.metadata,
str(rwValues),
str(expRWs),
str(prioMap),
str(fullRWs),
str(tt2Map),
)
tmpLog.debug("run task assigner for {0} tasks".format(len(jobSpecList)))
nBunchTask = 0
while nBunchTask < len(jobSpecList):
# get a bunch
jobsBunch = jobSpecList[nBunchTask : nBunchTask + maxBunchTask]
strIDs = "jediTaskID="
for tmpJobSpec in jobsBunch:
strIDs += "{0},".format(tmpJobSpec.taskID)
strIDs = strIDs[:-1]
tmpLog.debug(strIDs)
# increment index
nBunchTask += maxBunchTask
# run task brokerge
stS, outSs = PandaClient.runTaskAssignment(jobsBunch)
tmpLog.debug("{0}:{1}".format(stS, str(outSs)))
# return
tmpLog.debug("done")
return self.SC_SUCCEEDED
示例5: int
# 需要导入模块: from taskbuffer.JobSpec import JobSpec [as 别名]
# 或者: from taskbuffer.JobSpec.JobSpec import taskID [as 别名]
jobDefinitionID = int(time.time()) % 10000
job = JobSpec()
job.jobDefinitionID = jobDefinitionID
job.jobName = jobName
job.transformation = 'http://pandawms.org/pandawms-jobcache/lsst-trf.sh'
job.destinationDBlock = datasetName
job.destinationSE = 'local'
job.currentPriority = 1000
job.prodSourceLabel = 'panda'
job.jobParameters = ' --lsstJobParams="%s" ' % lsstJobParams
if prodUserName is not None:
job.prodUserName = prodUserName
else:
job.prodUserName = prodUserNameDefault
if PIPELINE_PROCESSINSTANCE is not None:
job.taskID = PIPELINE_PROCESSINSTANCE
if PIPELINE_EXECUTIONNUMBER is not None:
job.attemptNr = PIPELINE_EXECUTIONNUMBER
if PIPELINE_TASK is not None:
job.processingType = PIPELINE_TASK
job.computingSite = site
job.VO = "lsst"
fileOL = FileSpec()
fileOL.lfn = "%s.job.log.tgz" % job.jobName
fileOL.destinationDBlock = job.destinationDBlock
fileOL.destinationSE = job.destinationSE
fileOL.dataset = job.destinationDBlock
fileOL.type = 'log'
job.addFile(fileOL)