本文整理汇总了Python中taskbuffer.JobSpec.JobSpec.maxCpuCount方法的典型用法代码示例。如果您正苦于以下问题:Python JobSpec.maxCpuCount方法的具体用法?Python JobSpec.maxCpuCount怎么用?Python JobSpec.maxCpuCount使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类taskbuffer.JobSpec.JobSpec
的用法示例。
在下文中一共展示了JobSpec.maxCpuCount方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: prepare
# 需要导入模块: from taskbuffer.JobSpec import JobSpec [as 别名]
# 或者: from taskbuffer.JobSpec.JobSpec import maxCpuCount [as 别名]
def prepare(self,app,appsubconfig,appmasterconfig,jobmasterconfig):
'''prepare the subjob specific configuration'''
from pandatools import Client
from taskbuffer.JobSpec import JobSpec
from taskbuffer.FileSpec import FileSpec
job = app._getParent()
logger.debug('AthenaPandaRTHandler prepare called for %s', job.getFQID('.'))
# in case of a simple job get the dataset content, otherwise subjobs are filled by the splitter
if job.inputdata and not job._getRoot().subjobs:
if not job.inputdata.names:
contents = job.inputdata.get_contents(overlap=False, size=True)
for ds in contents.keys():
for f in contents[ds]:
job.inputdata.guids.append( f[0] )
job.inputdata.names.append( f[1][0] )
job.inputdata.sizes.append( f[1][1] )
job.inputdata.checksums.append( f[1][2] )
job.inputdata.scopes.append( f[1][3] )
site = job._getRoot().backend.site
job.backend.site = site
job.backend.actualCE = site
cloud = job._getRoot().backend.requirements.cloud
job.backend.requirements.cloud = cloud
# if no outputdata are given
if not job.outputdata:
job.outputdata = DQ2OutputDataset()
job.outputdata.datasetname = job._getRoot().outputdata.datasetname
#if not job.outputdata.datasetname:
else:
job.outputdata.datasetname = job._getRoot().outputdata.datasetname
if not job.outputdata.datasetname:
raise ApplicationConfigurationError('DQ2OutputDataset has no datasetname')
jspec = JobSpec()
jspec.jobDefinitionID = job._getRoot().id
jspec.jobName = commands.getoutput('uuidgen 2> /dev/null')
jspec.transformation = '%s/runGen-00-00-02' % Client.baseURLSUB
if job.inputdata:
jspec.prodDBlock = job.inputdata.dataset[0]
else:
jspec.prodDBlock = 'NULL'
jspec.destinationDBlock = job.outputdata.datasetname
if job.outputdata.location:
if not job._getRoot().subjobs or job.id == 0:
logger.warning('You have specified outputdata.location. Note that Panda may not support writing to a user-defined output location.')
jspec.destinationSE = job.outputdata.location
else:
jspec.destinationSE = site
jspec.prodSourceLabel = configPanda['prodSourceLabelRun']
jspec.processingType = configPanda['processingType']
jspec.assignedPriority = configPanda['assignedPriorityRun']
jspec.cloud = cloud
# memory
if job.backend.requirements.memory != -1:
jspec.minRamCount = job.backend.requirements.memory
# cputime
if job.backend.requirements.cputime != -1:
jspec.maxCpuCount = job.backend.requirements.cputime
jspec.computingSite = site
# library (source files)
if job.backend.libds:
flib = FileSpec()
flib.lfn = self.fileBO.lfn
flib.GUID = self.fileBO.GUID
flib.type = 'input'
flib.status = self.fileBO.status
flib.dataset = self.fileBO.destinationDBlock
flib.dispatchDBlock = self.fileBO.destinationDBlock
jspec.addFile(flib)
elif job.backend.bexec:
flib = FileSpec()
flib.lfn = self.library
flib.type = 'input'
flib.dataset = self.libDataset
flib.dispatchDBlock = self.libDataset
jspec.addFile(flib)
# input files FIXME: many more input types
if job.inputdata:
for guid, lfn, size, checksum, scope in zip(job.inputdata.guids,job.inputdata.names,job.inputdata.sizes, job.inputdata.checksums, job.inputdata.scopes):
finp = FileSpec()
finp.lfn = lfn
finp.GUID = guid
finp.scope = scope
# finp.fsize =
# finp.md5sum =
finp.dataset = job.inputdata.dataset[0]
#.........这里部分代码省略.........
示例2: run
# 需要导入模块: from taskbuffer.JobSpec import JobSpec [as 别名]
# 或者: from taskbuffer.JobSpec.JobSpec import maxCpuCount [as 别名]
#.........这里部分代码省略.........
if not tmpSiteID.startswith('ANALY_'):
continue
# remove test and local
if re.search('_test',tmpSiteID,re.I) != None:
continue
if re.search('_local',tmpSiteID,re.I) != None:
continue
# avoid same site
if self.avoidSameSite and self.getAggName(tmpSiteSpec.ddm) == origSiteDDM:
continue
# check DQ2 ID
if self.cloud in [None,tmpSiteSpec.cloud] \
and (self.getAggName(tmpSiteSpec.ddm) in maxDQ2Sites or inputDS == []):
# excluded sites
excludedFlag = False
for tmpExcSite in self.excludedSite:
if re.search(tmpExcSite,tmpSiteID) != None:
excludedFlag = True
break
if excludedFlag:
_logger.debug("%s skip %s since excluded" % (self.token,tmpSiteID))
continue
# use online only
if tmpSiteSpec.status != 'online':
_logger.debug("%s skip %s status=%s" % (self.token,tmpSiteID,tmpSiteSpec.status))
continue
# check maxinputsize
if (maxFileSize == None and origMaxInputSize > siteMapper.getSite(tmpSiteID).maxinputsize) or \
maxFileSize > siteMapper.getSite(tmpSiteID).maxinputsize:
_logger.debug("%s skip %s due to maxinputsize" % (self.token,tmpSiteID))
continue
# append
if not tmpSiteID in maxPandaSites:
maxPandaSites.append(tmpSiteID)
# choose at most 20 sites randomly to avoid too many lookup
random.shuffle(maxPandaSites)
maxPandaSites = maxPandaSites[:20]
_logger.debug("%s candidate PandaSites -> %s" % (self.token,str(maxPandaSites)))
# no Panda siteIDs
if maxPandaSites == []:
_logger.debug("%s no Panda site candidate" % self.token)
else:
# set AtlasRelease and cmtConfig to dummy job
tmpJobForBrokerage = JobSpec()
if self.job.AtlasRelease in ['NULL',None]:
tmpJobForBrokerage.AtlasRelease = ''
else:
tmpJobForBrokerage.AtlasRelease = self.job.AtlasRelease
# use nightlies
matchNight = re.search('^AnalysisTransforms-.*_(rel_\d+)$',self.job.homepackage)
if matchNight != None:
tmpJobForBrokerage.AtlasRelease += ':%s' % matchNight.group(1)
# use cache
else:
matchCache = re.search('^AnalysisTransforms-([^/]+)',self.job.homepackage)
if matchCache != None:
tmpJobForBrokerage.AtlasRelease = matchCache.group(1).replace('_','-')
if not self.job.cmtConfig in ['NULL',None]:
tmpJobForBrokerage.cmtConfig = self.job.cmtConfig
# memory size
if not self.job.minRamCount in ['NULL',None,0]:
tmpJobForBrokerage.minRamCount = self.job.minRamCount
# CPU count
if not self.job.maxCpuCount in ['NULL',None,0]:
tmpJobForBrokerage.maxCpuCount = self.job.maxCpuCount
# run brokerage
brokerage.broker.schedule([tmpJobForBrokerage],self.taskBuffer,siteMapper,forAnalysis=True,
setScanSiteList=maxPandaSites,trustIS=True,reportLog=True)
newSiteID = tmpJobForBrokerage.computingSite
self.brokerageInfo += tmpJobForBrokerage.brokerageErrorDiag
_logger.debug("%s runBrokerage - > %s" % (self.token,newSiteID))
# unknown site
if not siteMapper.checkSite(newSiteID):
_logger.error("%s unknown site" % self.token)
_logger.debug("%s failed" % self.token)
return
# get new site spec
newSiteSpec = siteMapper.getSite(newSiteID)
# avoid repetition
if self.getAggName(newSiteSpec.ddm) == origSiteDDM:
_logger.debug("%s assigned to the same site %s " % (self.token,newSiteID))
_logger.debug("%s end" % self.token)
return
# simulation mode
if self.simulation:
_logger.debug("%s end simulation" % self.token)
return
# prepare jobs
status = self.prepareJob(newSiteID,newSiteSpec)
if status:
# run SetUpper
statusSetUp = self.runSetUpper()
if not statusSetUp:
_logger.debug("%s runSetUpper failed" % self.token)
else:
_logger.debug("%s successfully assigned to %s" % (self.token,newSiteID))
_logger.debug("%s end" % self.token)
except:
errType,errValue,errTraceBack = sys.exc_info()
_logger.error("%s run() : %s %s" % (self.token,errType,errValue))