本文整理汇总了Python中pandajedi.jedicore.MsgWrapper.MsgWrapper.debug方法的典型用法代码示例。如果您正苦于以下问题:Python MsgWrapper.debug方法的具体用法?Python MsgWrapper.debug怎么用?Python MsgWrapper.debug使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pandajedi.jedicore.MsgWrapper.MsgWrapper
的用法示例。
在下文中一共展示了MsgWrapper.debug方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: doAction
# 需要导入模块: from pandajedi.jedicore.MsgWrapper import MsgWrapper [as 别名]
# 或者: from pandajedi.jedicore.MsgWrapper.MsgWrapper import debug [as 别名]
def doAction(self):
try:
# get logger
tmpLog = MsgWrapper(logger)
tmpLog.debug('start')
# action for priority boost
self.doActionForPriorityBoost(tmpLog)
# action for reassign
self.doActionForReassgin(tmpLog)
# action for throttled
self.doActionForThrottled(tmpLog)
# action for high prio pending
for minPriority,timeoutVal in [(950,10),
(900,30),
]:
self.doActionForHighPrioPending(tmpLog,minPriority,timeoutVal)
# action to set scout job data w/o scouts
self.doActionToSetScoutJobData(tmpLog)
# action to throttle jobs in paused tasks
self.doActionToThrottleJobInPausedTasks(tmpLog)
# action for jumbo
jumbo = JumboWatchDog(self.taskBufferIF, self.ddmIF, tmpLog, 'atlas', 'managed')
jumbo.run()
except:
errtype,errvalue = sys.exc_info()[:2]
tmpLog.error('failed with {0}:{1} {2}'.format(errtype.__name__,errvalue,
traceback.format_exc()))
# return
tmpLog.debug('done')
return self.SC_SUCCEEDED
示例2: doAction
# 需要导入模块: from pandajedi.jedicore.MsgWrapper import MsgWrapper [as 别名]
# 或者: from pandajedi.jedicore.MsgWrapper.MsgWrapper import debug [as 别名]
def doAction(self):
# get logger
tmpLog = MsgWrapper(logger)
tmpLog.debug('start')
# return
tmpLog.debug('done')
return self.SC_SUCCEEDED
示例3: doActionForReassgin
# 需要导入模块: from pandajedi.jedicore.MsgWrapper import MsgWrapper [as 别名]
# 或者: from pandajedi.jedicore.MsgWrapper.MsgWrapper import debug [as 别名]
def doActionForReassgin(self,gTmpLog):
# get DDM I/F
ddmIF = self.ddmIF.getInterface(self.vo)
# get site mapper
siteMapper = self.taskBufferIF.getSiteMapper()
# get tasks to get reassigned
taskList = self.taskBufferIF.getTasksToReassign_JEDI(self.vo,self.prodSourceLabel)
gTmpLog.debug('got {0} tasks to reassign'.format(len(taskList)))
for taskSpec in taskList:
tmpLog = MsgWrapper(logger,'<jediTaskID={0}'.format(taskSpec.jediTaskID))
tmpLog.debug('start to reassign')
# DDM backend
ddmBackEnd = taskSpec.getDdmBackEnd()
# update cloudtasks
tmpStat = self.taskBufferIF.setCloudTaskByUser('jedi',taskSpec.jediTaskID,taskSpec.cloud,'assigned',True)
if tmpStat != 'SUCCEEDED':
tmpLog.error('failed to update CloudTasks')
continue
# get datasets
tmpStat,datasetSpecList = self.taskBufferIF.getDatasetsWithJediTaskID_JEDI(taskSpec.jediTaskID,['output','log'])
if tmpStat != True:
tmpLog.error('failed to get datasets')
continue
# check cloud
if not siteMapper.checkCloud(taskSpec.cloud):
tmpLog.error("cloud={0} doesn't exist".format(taskSpec.cloud))
continue
# get T1
t1SiteName = siteMapper.getCloud(taskSpec.cloud)['dest']
t1Site = siteMapper.getSite(t1SiteName)
# loop over all datasets
isOK = True
for datasetSpec in datasetSpecList:
tmpLog.debug('dataset={0}'.format(datasetSpec.datasetName))
# get location
location = siteMapper.getDdmEndpoint(t1Site.sitename,datasetSpec.storageToken)
# set origin metadata
tmpLog.debug('setting metadata origin={0}'.format(location))
tmpStat = ddmIF.setDatasetMetadata(datasetSpec.datasetName,'origin',location)
if tmpStat != True:
tmpLog.error("failed to set origin")
isOK = False
break
# make subscription
tmpLog.debug('registering subscription to {0} with backend={1}'.format(location,
ddmBackEnd))
tmpStat = ddmIF.registerDatasetSubscription(datasetSpec.datasetName,location,
activity='Production',ignoreUnknown=True,
backEnd=ddmBackEnd)
if tmpStat != True:
tmpLog.error("failed to make subscription")
isOK = False
break
# succeeded
if isOK:
# activate task
taskSpec.status = taskSpec.oldStatus
taskSpec.oldStatus = None
self.taskBufferIF.updateTask_JEDI(taskSpec,{'jediTaskID':taskSpec.jediTaskID})
tmpLog.debug('finished to reassign')
示例4: doAction
# 需要导入模块: from pandajedi.jedicore.MsgWrapper import MsgWrapper [as 别名]
# 或者: from pandajedi.jedicore.MsgWrapper.MsgWrapper import debug [as 别名]
def doAction(self):
try:
# get logger
tmpLog = MsgWrapper(logger)
tmpLog.debug('start')
# action for priority boost
self.doActionForPriorityBoost(tmpLog)
# action for reassign
self.doActionForReassgin(tmpLog)
# action for throttled
self.doActionForThrottled(tmpLog)
# action for high prio pending
for minPriority,timeoutVal in [(950,10),
(900,30),
]:
self.doActionForHighPrioPending(tmpLog,minPriority,timeoutVal)
# action to set scout job data w/o scouts
self.doActionToSetScoutJobData(tmpLog)
except:
errtype,errvalue = sys.exc_info()[:2]
tmpLog.error('failed with {0}:{1} {2}'.format(errtype.__name__,errvalue,
traceback.format_exc()))
# return
tmpLog.debug('done')
return self.SC_SUCCEEDED
示例5: doCheck
# 需要导入模块: from pandajedi.jedicore.MsgWrapper import MsgWrapper [as 别名]
# 或者: from pandajedi.jedicore.MsgWrapper.MsgWrapper import debug [as 别名]
def doCheck(self,taskSpecList):
# make logger
tmpLog = MsgWrapper(logger)
tmpLog.debug('start doCheck')
# return for failure
retFatal = self.SC_FATAL,{}
retTmpError = self.SC_FAILED,{}
# get list of jediTaskIDs
taskIdList = []
taskSpecMap = {}
for taskSpec in taskSpecList:
taskIdList.append(taskSpec.jediTaskID)
taskSpecMap[taskSpec.jediTaskID] = taskSpec
# check with panda
tmpLog.debug('check with panda')
tmpPandaStatus,cloudsInPanda = PandaClient.seeCloudTask(taskIdList)
if tmpPandaStatus != 0:
tmpLog.error('failed to see clouds')
return retTmpError
# make return map
retMap = {}
for tmpTaskID,tmpCoreName in cloudsInPanda.iteritems():
tmpLog.debug('jediTaskID={0} -> {1}'.format(tmpTaskID,tmpCoreName))
if not tmpCoreName in ['NULL','',None]:
taskSpec = taskSpecMap[tmpTaskID]
if taskSpec.useWorldCloud():
# get destinations for WORLD cloud
ddmIF = self.ddmIF.getInterface(taskSpec.vo)
# get site
siteSpec = self.siteMapper.getSite(tmpCoreName)
# get nucleus
nucleus = siteSpec.pandasite
# get output/log datasets
tmpStat,tmpDatasetSpecs = self.taskBufferIF.getDatasetsWithJediTaskID_JEDI(tmpTaskID,['output','log'])
# get destinations
retMap[tmpTaskID] = {'datasets':[],'nucleus':nucleus}
for datasetSpec in tmpDatasetSpecs:
# skip distributed datasets
if DataServiceUtils.getDistributedDestination(datasetSpec.storageToken) != None:
continue
# get token
token = ddmIF.convertTokenToEndpoint(siteSpec.ddm,datasetSpec.storageToken)
# use default endpoint
if token == None:
token = siteSpec.ddm
# add origianl token
if not datasetSpec.storageToken in ['',None]:
token += '/{0}'.format(datasetSpec.storageToken)
retMap[tmpTaskID]['datasets'].append({'datasetID':datasetSpec.datasetID,
'token':'dst:{0}'.format(token),
'destination':tmpCoreName})
else:
retMap[tmpTaskID] = tmpCoreName
tmpLog.debug('ret {0}'.format(str(retMap)))
# return
tmpLog.debug('done')
return self.SC_SUCCEEDED,retMap
示例6: doCheck
# 需要导入模块: from pandajedi.jedicore.MsgWrapper import MsgWrapper [as 别名]
# 或者: from pandajedi.jedicore.MsgWrapper.MsgWrapper import debug [as 别名]
def doCheck(self, taskSpecList):
# make logger
tmpLog = MsgWrapper(logger)
tmpLog.debug("start doCheck")
# return for failure
retFatal = self.SC_FATAL, {}
retTmpError = self.SC_FAILED, {}
# get list of jediTaskIDs
taskIdList = []
taskSpecMap = {}
for taskSpec in taskSpecList:
taskIdList.append(taskSpec.jediTaskID)
taskSpecMap[taskSpec.jediTaskID] = taskSpec
# check with panda
tmpLog.debug("check with panda")
tmpPandaStatus, cloudsInPanda = PandaClient.seeCloudTask(taskIdList)
if tmpPandaStatus != 0:
tmpLog.error("failed to see clouds")
return retTmpError
# make return map
retMap = {}
for tmpTaskID, tmpCoreName in cloudsInPanda.iteritems():
tmpLog.debug("jediTaskID={0} -> {1}".format(tmpTaskID, tmpCoreName))
if not tmpCoreName in ["NULL", "", None]:
taskSpec = taskSpecMap[tmpTaskID]
if taskSpec.useWorldCloud():
# get destinations for WORLD cloud
ddmIF = self.ddmIF.getInterface(taskSpec.vo)
# get site
siteSpec = self.siteMapper.getSite(tmpCoreName)
# get output/log datasets
tmpStat, tmpDatasetSpecs = self.taskBufferIF.getDatasetsWithJediTaskID_JEDI(
tmpTaskID, ["output", "log"]
)
# get destinations
retMap[tmpTaskID] = []
for datasetSpec in tmpDatasetSpecs:
token = ddmIF.convertTokenToEndpoint(siteSpec.ddm, datasetSpec.storageToken)
# use default endpoint
if token == None:
token = siteSpec.ddm
retMap[tmpTaskID].append(
{
"datasetID": datasetSpec.datasetID,
"token": "dst:{0}".format(token),
"destination": tmpCoreName,
}
)
else:
retMap[tmpTaskID] = tmpCoreName
tmpLog.debug("ret {0}".format(str(retMap)))
# return
tmpLog.debug("done")
return self.SC_SUCCEEDED, retMap
示例7: start
# 需要导入模块: from pandajedi.jedicore.MsgWrapper import MsgWrapper [as 别名]
# 或者: from pandajedi.jedicore.MsgWrapper.MsgWrapper import debug [as 别名]
def start(self):
# start base classes
JediKnight.start(self)
FactoryBase.initializeMods(self, self.taskBufferIF, self.ddmIF)
# go into main loop
while True:
startTime = datetime.datetime.utcnow()
try:
# get logger
tmpLog = MsgWrapper(logger)
tmpLog.debug('start')
# loop over all vos
for vo in self.vos:
# loop over all sourceLabels
for prodSourceLabel in self.prodSourceLabels:
# get the list of tasks to refine
tmpList = self.taskBufferIF.getTasksToRefine_JEDI(vo, prodSourceLabel)
if tmpList == None:
# failed
tmpLog.error('failed to get the list of tasks to refine')
else:
tmpLog.debug('got {0} tasks'.format(len(tmpList)))
# put to a locked list
taskList = ListWithLock(tmpList)
# make thread pool
threadPool = ThreadPool()
# get work queue mapper
workQueueMapper = self.taskBufferIF.getWorkQueueMap()
# make workers
nWorker = jedi_config.taskrefine.nWorkers
for iWorker in range(nWorker):
thr = TaskRefinerThread(taskList, threadPool,
self.taskBufferIF,
self.ddmIF,
self, workQueueMapper)
thr.start()
# join
threadPool.join()
except:
errtype, errvalue = sys.exc_info()[:2]
tmpLog.error('failed in {0}.start() with {1} {2}'.format(self.__class__.__name__,
errtype.__name__, errvalue))
tmpLog.error('Traceback: {0}'.format(traceback.format_exc()))
# sleep if needed
loopCycle = jedi_config.taskrefine.loopCycle
timeDelta = datetime.datetime.utcnow() - startTime
sleepPeriod = loopCycle - timeDelta.seconds
if sleepPeriod > 0:
time.sleep(sleepPeriod)
# randomize cycle
self.randomSleep()
示例8: doAction
# 需要导入模块: from pandajedi.jedicore.MsgWrapper import MsgWrapper [as 别名]
# 或者: from pandajedi.jedicore.MsgWrapper.MsgWrapper import debug [as 别名]
def doAction(self):
try:
# get logger
tmpLog = MsgWrapper(logger)
tmpLog.debug('start')
# action for priority boost
self.doActionForPriorityBoost(tmpLog)
# action for reassign
self.doActionForReassgin(tmpLog)
except:
errtype,errvalue = sys.exc_info()[:2]
tmpLog.error('failed with {0} {1}'.format(errtype,errvalue))
# return
tmpLog.debug('done')
return self.SC_SUCCEEDED
示例9: doAction
# 需要导入模块: from pandajedi.jedicore.MsgWrapper import MsgWrapper [as 别名]
# 或者: from pandajedi.jedicore.MsgWrapper.MsgWrapper import debug [as 别名]
def doAction(self):
try:
# get logger
tmpLog = MsgWrapper(logger)
tmpLog.debug('start')
# action for priority boost
self.doActionForPriorityBoost(tmpLog)
# action for reassign
self.doActionForReassgin(tmpLog)
# action for throttled
self.doActionForThrottled(tmpLog)
# action for high prio pending
for minPriority,timeoutVal in [(950,10),
(900,30),
]:
self.doActionForHighPrioPending(tmpLog,minPriority,timeoutVal)
except:
errtype,errvalue = sys.exc_info()[:2]
tmpLog.error('failed with {0} {1}'.format(errtype,errvalue))
# return
tmpLog.debug('done')
return self.SC_SUCCEEDED
示例10: toBeThrottled
# 需要导入模块: from pandajedi.jedicore.MsgWrapper import MsgWrapper [as 别名]
# 或者: from pandajedi.jedicore.MsgWrapper.MsgWrapper import debug [as 别名]
def toBeThrottled(self, vo, prodSourceLabel, cloudName, workQueue, resourceType):
# make logger
tmpLog = MsgWrapper(logger)
tmpLog.debug('start vo={0} label={1} cloud={2} workQueue={3}'.format(vo,prodSourceLabel,cloudName,
workQueue.queue_name))
# check if unthrottled
if workQueue.queue_share == None:
tmpLog.debug(" done : unthrottled since share=None")
return self.retUnThrottled
tmpLog.debug(" done : SKIP")
return self.retThrottled
示例11: runImpl
# 需要导入模块: from pandajedi.jedicore.MsgWrapper import MsgWrapper [as 别名]
# 或者: from pandajedi.jedicore.MsgWrapper.MsgWrapper import debug [as 别名]
def runImpl(self):
# cutoff for disk in TB
diskThreshold = 5 * 1024
# dataset type to ignore file availability check
datasetTypeToSkipCheck = ['log']
thrInputSize = 1024*1024*1024
thrInputNum = 100
thrInputSizeFrac = 0.1
thrInputNumFrac = 0.1
cutOffRW = 50
negWeightTape = 0.001
# main
lastJediTaskID = None
siteMapper = self.taskBufferIF.getSiteMapper()
while True:
try:
taskInputList = self.inputList.get(1)
# no more datasets
if len(taskInputList) == 0:
self.logger.debug('{0} terminating after processing {1} tasks since no more inputs '.format(self.__class__.__name__,
self.numTasks))
return
# loop over all tasks
for taskSpec,inputChunk in taskInputList:
lastJediTaskID = taskSpec.jediTaskID
# make logger
tmpLog = MsgWrapper(self.logger,'<jediTaskID={0}>'.format(taskSpec.jediTaskID),monToken='{0}'.format(taskSpec.jediTaskID))
tmpLog.debug('start')
# get nuclei
nucleusList = siteMapper.nuclei
if taskSpec.nucleus in nucleusList:
candidateNucleus = taskSpec.nucleus
else:
tmpLog.debug('got {0} candidates'.format(len(nucleusList)))
######################################
# check status
newNucleusList = {}
for tmpNucleus,tmpNucleusSpec in nucleusList.iteritems():
if not tmpNucleusSpec.state in ['ACTIVE']:
tmpLog.debug(' skip nucleus={0} due to status={1} criteria=-status'.format(tmpNucleus,
tmpNucleusSpec.state))
else:
newNucleusList[tmpNucleus] = tmpNucleusSpec
nucleusList = newNucleusList
tmpLog.debug('{0} candidates passed status check'.format(len(nucleusList)))
if nucleusList == {}:
tmpLog.error('no candidates')
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
self.sendLogMessage(tmpLog)
continue
######################################
# check endpoint
newNucleusList = {}
tmpStat,tmpDatasetSpecList = self.taskBufferIF.getDatasetsWithJediTaskID_JEDI(taskSpec.jediTaskID,
['output','log'])
for tmpNucleus,tmpNucleusSpec in nucleusList.iteritems():
toSkip = False
for tmpDatasetSpec in tmpDatasetSpecList:
# ignore distributed datasets
if DataServiceUtils.getDistributedDestination(tmpDatasetSpec.storageToken) != None:
continue
# get endpoint with the pattern
tmpEP = tmpNucleusSpec.getAssoicatedEndpoint(tmpDatasetSpec.storageToken)
if tmpEP == None:
tmpLog.debug(' skip nucleus={0} since no endpoint with {1} criteria=-match'.format(tmpNucleus,
tmpDatasetSpec.storageToken))
toSkip = True
break
# check state
"""
if not tmpEP['state'] in ['ACTIVE']:
tmpLog.debug(' skip nucleus={0} since endpoint {1} is in {2} criteria=-epstatus'.format(tmpNucleus,
tmpEP['ddm_endpoint_name'],
tmpEP['state']))
toSkip = True
break
"""
# check space
tmpSpaceSize = tmpEP['space_free'] + tmpEP['space_expired']
if tmpSpaceSize < diskThreshold:
tmpLog.debug(' skip nucleus={0} since disk shortage ({1}<{2}) at endpoint {3} criteria=-space'.format(tmpNucleus,
tmpSpaceSize,
diskThreshold,
tmpEP['state']))
toSkip = True
break
if not toSkip:
newNucleusList[tmpNucleus] = tmpNucleusSpec
nucleusList = newNucleusList
tmpLog.debug('{0} candidates passed endpoint check'.format(len(nucleusList)))
if nucleusList == {}:
tmpLog.error('no candidates')
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
self.sendLogMessage(tmpLog)
continue
######################################
# data locality
toSkip = False
availableData = {}
for datasetSpec in inputChunk.getDatasets():
#.........这里部分代码省略.........
示例12: findMissingFiles
# 需要导入模块: from pandajedi.jedicore.MsgWrapper import MsgWrapper [as 别名]
# 或者: from pandajedi.jedicore.MsgWrapper.MsgWrapper import debug [as 别名]
def findMissingFiles(self,jediTaskID,cloudName):
tmpLog = MsgWrapper(logger,'<jediTaskID={0}>'.format(jediTaskID))
tmpLog.debug('start findMissingFiles')
# return for failure
retError = self.SC_FAILED
# get datasets
tmpSt,datasetSpecList = self.taskBufferIF.getDatasetsWithJediTaskID_JEDI(jediTaskID,['input'],True)
if not tmpSt:
tmpLog.error('failed to get the list of datasets')
return retError
# loop over all datasets
for datasetSpec in datasetSpecList:
# check only master dataset
if not datasetSpec.isMaster():
continue
tmpLog.debug('checking {0}'.format(datasetSpec.datasetName))
# get ddmIF
ddmIF = self.ddmIF.getInterface(datasetSpec.vo)
if ddmIF == None:
tmpLog.error('failed to get DDM I/F for vo={0}'.format(datasetSpec.vo))
return retError
# get the list of sites where data is available
tmpSt,tmpRet = AtlasBrokerUtils.getSitesWithData(self.siteMapper,ddmIF,
datasetSpec.datasetName)
if tmpSt != self.SC_SUCCEEDED:
tmpLog.error('failed to get the list of sites where {0} is available, since {1}'.format(datasetSpec.datasetName,
tmpRet))
return retError
dataSiteMap = tmpRet
# data is unavailable in cloud
if not dataSiteMap.has_key(cloudName):
tmpLog.error('{0} is unavailable in cloud={1} map={2}'.format(datasetSpec.datasetName,cloudName,str(dataSiteMap)))
return retError
# mapping between sites and storage endpoints
checkedSites = [self.siteMapper.getCloud(cloudName)['source']]+dataSiteMap[cloudName]['t2']
siteStorageEP = AtlasBrokerUtils.getSiteStorageEndpointMap(checkedSites,self.siteMapper)
# get available files per site/endpoint
tmpAvFileMap = ddmIF.getAvailableFiles(datasetSpec,
siteStorageEP,
self.siteMapper,
ngGroup=[1],
checkLFC=True)
if tmpAvFileMap == None:
tmpLog.error('failed to get available file list for {0}'.format(datasetSpec.datasetName))
return retError
# check availability
missingFiles = []
for fileSpec in datasetSpec.Files:
fileFound = False
for tmpSiteName,availableFilesMap in tmpAvFileMap.iteritems():
for tmpStorageType,availableFiles in availableFilesMap.iteritems():
for availableFile in availableFiles:
if fileSpec.lfn == availableFile.lfn:
fileFound = True
break
if fileFound:
break
if fileFound:
break
# missing
if not fileFound:
missingFiles.append(fileSpec.fileID)
tmpLog.debug('{0} missing'.format(fileSpec.lfn))
# update contents
if missingFiles != []:
tmpSt = self.taskBufferIF.setMissingFiles_JEDI(jediTaskID,datasetSpec.datasetID,missingFiles)
if not tmpSt:
tmpLog.error('failed to set missing files in {0}'.format(datasetSpec.datasetName))
return retError
tmpLog.debug('done findMissingFiles')
return self.SC_SUCCEEDED
示例13: doAction
# 需要导入模块: from pandajedi.jedicore.MsgWrapper import MsgWrapper [as 别名]
# 或者: from pandajedi.jedicore.MsgWrapper.MsgWrapper import debug [as 别名]
def doAction(self):
try:
# get logger
tmpLog = MsgWrapper(logger)
tmpLog.debug('start')
origTmpLog = tmpLog
# check every 60 min
checkInterval = 60
# get lib.tgz for waiting jobs
libList = self.taskBufferIF.getLibForWaitingRunJob_JEDI(self.vo,self.prodSourceLabel,checkInterval)
tmpLog.debug('got {0} lib.tgz files'.format(len(libList)))
# activate or kill orphan jobs which were submitted to use lib.tgz when the lib.tgz was being produced
for prodUserName,datasetName,tmpFileSpec in libList:
tmpLog = MsgWrapper(logger,'<jediTaskID={0}>'.format(tmpFileSpec.jediTaskID))
tmpLog.debug('start')
# check status of lib.tgz
if tmpFileSpec.status == 'failed':
# get buildJob
pandaJobSpecs = self.taskBufferIF.peekJobs([tmpFileSpec.PandaID],
fromDefined=False,
fromActive=False,
fromWaiting=False)
pandaJobSpec = pandaJobSpecs[0]
if pandaJobSpec != None:
# kill
self.taskBufferIF.updateJobs([pandaJobSpec],False)
tmpLog.debug(' killed downstream jobs for user="{0}" with libDS={1}'.format(prodUserName,datasetName))
else:
# PandaJobSpec not found
tmpLog.error(' cannot find PandaJobSpec for user="{0}" with PandaID={1}'.format(prodUserName,
tmpFileSpec.PandaID))
elif tmpFileSpec.status == 'finished':
# set metadata
self.taskBufferIF.setGUIDs([{'guid':tmpFileSpec.GUID,
'lfn':tmpFileSpec.lfn,
'checksum':tmpFileSpec.checksum,
'fsize':tmpFileSpec.fsize,
'scope':tmpFileSpec.scope,
}])
# get lib dataset
dataset = self.taskBufferIF.queryDatasetWithMap({'name':datasetName})
if dataset != None:
# activate jobs
aThr = Activator(self.taskBufferIF,dataset)
aThr.start()
aThr.join()
tmpLog.debug(' activated downstream jobs for user="{0}" with libDS={1}'.format(prodUserName,datasetName))
else:
# datasetSpec not found
tmpLog.error(' cannot find datasetSpec for user="{0}" with libDS={1}'.format(prodUserName,datasetName))
else:
# lib.tgz is not ready
tmpLog.debug(' keep waiting for user="{0}" libDS={1}'.format(prodUserName,datasetName))
except:
tmpLog = origTmpLog
errtype,errvalue = sys.exc_info()[:2]
tmpLog.error('failed with {0} {1}'.format(errtype,errvalue))
# return
tmpLog = origTmpLog
tmpLog.debug('done')
return self.SC_SUCCEEDED
示例14: doBrokerage
# 需要导入模块: from pandajedi.jedicore.MsgWrapper import MsgWrapper [as 别名]
# 或者: from pandajedi.jedicore.MsgWrapper.MsgWrapper import debug [as 别名]
def doBrokerage(self, inputList, vo, prodSourceLabel, workQueue):
# variables for submission
maxBunchTask = 100
# make logger
tmpLog = MsgWrapper(logger)
tmpLog.debug("start doBrokerage")
# return for failure
retFatal = self.SC_FATAL
retTmpError = self.SC_FAILED
tmpLog.debug("vo={0} label={1} queue={2}".format(vo, prodSourceLabel, workQueue.queue_name))
# loop over all tasks
allRwMap = {}
prioMap = {}
tt2Map = {}
expRWs = {}
jobSpecList = []
for tmpJediTaskID, tmpInputList in inputList:
for taskSpec, cloudName, inputChunk in tmpInputList:
# make JobSpec to be submitted for TaskAssigner
jobSpec = JobSpec()
jobSpec.taskID = taskSpec.jediTaskID
jobSpec.jediTaskID = taskSpec.jediTaskID
# set managed to trigger TA
jobSpec.prodSourceLabel = "managed"
jobSpec.processingType = taskSpec.processingType
jobSpec.workingGroup = taskSpec.workingGroup
jobSpec.metadata = taskSpec.processingType
jobSpec.assignedPriority = taskSpec.taskPriority
jobSpec.currentPriority = taskSpec.currentPriority
jobSpec.maxDiskCount = (taskSpec.getOutDiskSize() + taskSpec.getWorkDiskSize()) / 1024 / 1024
if taskSpec.useWorldCloud():
# use destinationSE to trigger task brokerage in WORLD cloud
jobSpec.destinationSE = taskSpec.cloud
prodDBlock = None
setProdDBlock = False
for datasetSpec in inputChunk.getDatasets():
prodDBlock = datasetSpec.datasetName
if datasetSpec.isMaster():
jobSpec.prodDBlock = datasetSpec.datasetName
setProdDBlock = True
for fileSpec in datasetSpec.Files:
tmpInFileSpec = fileSpec.convertToJobFileSpec(datasetSpec)
jobSpec.addFile(tmpInFileSpec)
# use secondary dataset name as prodDBlock
if setProdDBlock == False and prodDBlock != None:
jobSpec.prodDBlock = prodDBlock
# append
jobSpecList.append(jobSpec)
prioMap[jobSpec.taskID] = jobSpec.currentPriority
tt2Map[jobSpec.taskID] = jobSpec.processingType
# get RW for a priority
if not allRwMap.has_key(jobSpec.currentPriority):
tmpRW = self.taskBufferIF.calculateRWwithPrio_JEDI(
vo, prodSourceLabel, workQueue, jobSpec.currentPriority
)
if tmpRW == None:
tmpLog.error("failed to calculate RW with prio={0}".format(jobSpec.currentPriority))
return retTmpError
allRwMap[jobSpec.currentPriority] = tmpRW
# get expected RW
expRW = self.taskBufferIF.calculateTaskRW_JEDI(jobSpec.jediTaskID)
if expRW == None:
tmpLog.error("failed to calculate RW for jediTaskID={0}".format(jobSpec.jediTaskID))
return retTmpError
expRWs[jobSpec.taskID] = expRW
# get fullRWs
fullRWs = self.taskBufferIF.calculateRWwithPrio_JEDI(vo, prodSourceLabel, None, None)
if fullRWs == None:
tmpLog.error("failed to calculate full RW")
return retTmpError
# set metadata
for jobSpec in jobSpecList:
rwValues = allRwMap[jobSpec.currentPriority]
jobSpec.metadata = "%s;%s;%s;%s;%s;%s" % (
jobSpec.metadata,
str(rwValues),
str(expRWs),
str(prioMap),
str(fullRWs),
str(tt2Map),
)
tmpLog.debug("run task assigner for {0} tasks".format(len(jobSpecList)))
nBunchTask = 0
while nBunchTask < len(jobSpecList):
# get a bunch
jobsBunch = jobSpecList[nBunchTask : nBunchTask + maxBunchTask]
strIDs = "jediTaskID="
for tmpJobSpec in jobsBunch:
strIDs += "{0},".format(tmpJobSpec.taskID)
strIDs = strIDs[:-1]
tmpLog.debug(strIDs)
# increment index
nBunchTask += maxBunchTask
# run task brokerge
stS, outSs = PandaClient.runTaskAssignment(jobsBunch)
tmpLog.debug("{0}:{1}".format(stS, str(outSs)))
# return
tmpLog.debug("done")
return self.SC_SUCCEEDED
示例15: doSplit
# 需要导入模块: from pandajedi.jedicore.MsgWrapper import MsgWrapper [as 别名]
# 或者: from pandajedi.jedicore.MsgWrapper.MsgWrapper import debug [as 别名]
def doSplit(self,taskSpec,inputChunk,siteMapper):
# return for failure
retFatal = self.SC_FATAL,[]
retTmpError = self.SC_FAILED,[]
# make logger
tmpLog = MsgWrapper(logger,'<jediTaskID={0} datasetID={1}>'.format(taskSpec.jediTaskID,inputChunk.masterIndexName))
tmpLog.debug('start')
if not inputChunk.isMerging:
# set maxNumFiles using taskSpec if specified
maxNumFiles = taskSpec.getMaxNumFilesPerJob()
# set fsize gradients using taskSpec
sizeGradients = taskSpec.getOutDiskSize()
# set fsize intercepts using taskSpec
sizeIntercepts = taskSpec.getWorkDiskSize()
# walltime
walltimeGradient = taskSpec.walltime
# number of events per job if defined
nEventsPerJob = taskSpec.getNumEventsPerJob()
# number of files per job if defined
nFilesPerJob = taskSpec.getNumFilesPerJob()
if nFilesPerJob == None and nEventsPerJob == None and inputChunk.useScout() and not taskSpec.useLoadXML():
nFilesPerJob = 1
# grouping with boundaryID
useBoundary = taskSpec.useGroupWithBoundaryID()
# fsize intercepts per input size
sizeGradientsPerInSize = None
# max primay output size
maxOutSize = None
# max size per job
maxSizePerJob = taskSpec.getMaxSizePerJob()
else:
# set parameters for merging
maxNumFiles = taskSpec.getMaxNumFilesPerMergeJob()
if maxNumFiles == None:
maxNumFiles = 50
sizeGradients = 0
walltimeGradient = 0
nFilesPerJob = taskSpec.getNumFilesPerMergeJob()
nEventsPerJob = taskSpec.getNumEventsPerMergeJob()
maxSizePerJob = None
useBoundary = {'inSplit':3}
# gradients per input size is 1
sizeGradientsPerInSize = 1
# intercepts for libDS
sizeIntercepts = taskSpec.getWorkDiskSize()
# mergein of 500MB
interceptsMergin = 500 * 1024 * 1024
if sizeIntercepts < interceptsMergin:
sizeIntercepts = interceptsMergin
maxOutSize = taskSpec.getMaxSizePerMergeJob()
if maxOutSize == None:
# max output size is 5GB for merging by default
maxOutSize = 5 * 1024 * 1024 * 1024
# LB
respectLB = taskSpec.respectLumiblock()
# dump
tmpLog.debug('maxNumFiles={0} sizeGradients={1} sizeIntercepts={2} useBoundary={3}'.format(maxNumFiles,
sizeGradients,
sizeIntercepts,
useBoundary))
tmpLog.debug('walltimeGradient={0} nFilesPerJob={1} nEventsPerJob={2}'.format(walltimeGradient,
nFilesPerJob,
nEventsPerJob))
tmpLog.debug('sizeGradientsPerInSize={0} maxOutSize={1} respectLB={2}'.format(sizeGradientsPerInSize,
maxOutSize,
respectLB))
# split
returnList = []
subChunks = []
iSubChunks = 0
nSubChunks = 50
while True:
# change site
if iSubChunks % nSubChunks == 0:
# append to return map
if subChunks != []:
returnList.append({'siteName':siteName,
'subChunks':subChunks,
'siteCandidate':siteCandidate,
})
# reset
subChunks = []
# new candidate
siteCandidate = inputChunk.getOneSiteCandidate()
siteName = siteCandidate.siteName
siteSpec = siteMapper.getSite(siteName)
# get maxSize if it is set in taskSpec
maxSize = maxSizePerJob
if maxSize == None or maxSize > (siteSpec.maxwdir * 1024 * 1024):
# use maxwdir as the default maxSize
maxSize = siteSpec.maxwdir * 1024 * 1024
# max walltime
maxWalltime = siteSpec.maxtime
# core count
if siteSpec.coreCount > 0:
coreCount = siteSpec.coreCount
else:
coreCount = 1
tmpLog.debug('chosen {0}'.format(siteName))
tmpLog.debug('maxSize={0} maxWalltime={1} coreCount={2}'.format(maxSize,maxWalltime,coreCount))
#.........这里部分代码省略.........