本文整理汇总了Python中pandajedi.jedicore.MsgWrapper.MsgWrapper.uploadLog方法的典型用法代码示例。如果您正苦于以下问题:Python MsgWrapper.uploadLog方法的具体用法?Python MsgWrapper.uploadLog怎么用?Python MsgWrapper.uploadLog使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pandajedi.jedicore.MsgWrapper.MsgWrapper
的用法示例。
在下文中一共展示了MsgWrapper.uploadLog方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: runImpl
# 需要导入模块: from pandajedi.jedicore.MsgWrapper import MsgWrapper [as 别名]
# 或者: from pandajedi.jedicore.MsgWrapper.MsgWrapper import uploadLog [as 别名]
def runImpl(self):
# cutoff for disk in TB
diskThreshold = 5 * 1024
# dataset type to ignore file availability check
datasetTypeToSkipCheck = ['log']
thrInputSize = 1024*1024*1024
thrInputNum = 100
thrInputSizeFrac = 0.1
thrInputNumFrac = 0.1
cutOffRW = 50
negWeightTape = 0.001
# main
lastJediTaskID = None
siteMapper = self.taskBufferIF.getSiteMapper()
while True:
try:
taskInputList = self.inputList.get(1)
# no more datasets
if len(taskInputList) == 0:
self.logger.debug('{0} terminating after processing {1} tasks since no more inputs '.format(self.__class__.__name__,
self.numTasks))
return
# loop over all tasks
for taskSpec,inputChunk in taskInputList:
lastJediTaskID = taskSpec.jediTaskID
# make logger
tmpLog = MsgWrapper(self.logger,'<jediTaskID={0}>'.format(taskSpec.jediTaskID),monToken='{0}'.format(taskSpec.jediTaskID))
tmpLog.debug('start')
# get nuclei
nucleusList = siteMapper.nuclei
if taskSpec.nucleus in nucleusList:
candidateNucleus = taskSpec.nucleus
else:
tmpLog.debug('got {0} candidates'.format(len(nucleusList)))
######################################
# check status
newNucleusList = {}
for tmpNucleus,tmpNucleusSpec in nucleusList.iteritems():
if not tmpNucleusSpec.state in ['ACTIVE']:
tmpLog.debug(' skip nucleus={0} due to status={1} criteria=-status'.format(tmpNucleus,
tmpNucleusSpec.state))
else:
newNucleusList[tmpNucleus] = tmpNucleusSpec
nucleusList = newNucleusList
tmpLog.debug('{0} candidates passed status check'.format(len(nucleusList)))
if nucleusList == {}:
tmpLog.error('no candidates')
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
self.sendLogMessage(tmpLog)
continue
######################################
# check endpoint
newNucleusList = {}
tmpStat,tmpDatasetSpecList = self.taskBufferIF.getDatasetsWithJediTaskID_JEDI(taskSpec.jediTaskID,
['output','log'])
for tmpNucleus,tmpNucleusSpec in nucleusList.iteritems():
toSkip = False
for tmpDatasetSpec in tmpDatasetSpecList:
# ignore distributed datasets
if DataServiceUtils.getDistributedDestination(tmpDatasetSpec.storageToken) != None:
continue
# get endpoint with the pattern
tmpEP = tmpNucleusSpec.getAssoicatedEndpoint(tmpDatasetSpec.storageToken)
if tmpEP == None:
tmpLog.debug(' skip nucleus={0} since no endpoint with {1} criteria=-match'.format(tmpNucleus,
tmpDatasetSpec.storageToken))
toSkip = True
break
# check state
"""
if not tmpEP['state'] in ['ACTIVE']:
tmpLog.debug(' skip nucleus={0} since endpoint {1} is in {2} criteria=-epstatus'.format(tmpNucleus,
tmpEP['ddm_endpoint_name'],
tmpEP['state']))
toSkip = True
break
"""
# check space
tmpSpaceSize = tmpEP['space_free'] + tmpEP['space_expired']
if tmpSpaceSize < diskThreshold:
tmpLog.debug(' skip nucleus={0} since disk shortage ({1}<{2}) at endpoint {3} criteria=-space'.format(tmpNucleus,
tmpSpaceSize,
diskThreshold,
tmpEP['state']))
toSkip = True
break
if not toSkip:
newNucleusList[tmpNucleus] = tmpNucleusSpec
nucleusList = newNucleusList
tmpLog.debug('{0} candidates passed endpoint check'.format(len(nucleusList)))
if nucleusList == {}:
tmpLog.error('no candidates')
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
self.sendLogMessage(tmpLog)
continue
######################################
# data locality
toSkip = False
availableData = {}
for datasetSpec in inputChunk.getDatasets():
#.........这里部分代码省略.........
示例2: doBrokerage
# 需要导入模块: from pandajedi.jedicore.MsgWrapper import MsgWrapper [as 别名]
# 或者: from pandajedi.jedicore.MsgWrapper.MsgWrapper import uploadLog [as 别名]
def doBrokerage(self,taskSpec,cloudName,inputChunk,taskParamMap):
# make logger
tmpLog = MsgWrapper(logger,'<jediTaskID={0}>'.format(taskSpec.jediTaskID))
tmpLog.debug('start')
# return for failure
retFatal = self.SC_FATAL,inputChunk
retTmpError = self.SC_FAILED,inputChunk
# get sites in the cloud
if not taskSpec.site in ['',None]:
scanSiteList = [taskSpec.site]
tmpLog.debug('site={0} is pre-assigned'.format(taskSpec.site))
elif inputChunk.getPreassignedSite() != None:
scanSiteList = [inputChunk.getPreassignedSite()]
tmpLog.debug('site={0} is pre-assigned in masterDS'.format(inputChunk.getPreassignedSite()))
else:
scanSiteList = self.siteMapper.getCloud(cloudName)['sites']
tmpLog.debug('cloud=%s has %s candidates' % (cloudName,len(scanSiteList)))
tmpLog.debug('initial {0} candidates'.format(len(scanSiteList)))
######################################
# selection for status
newScanSiteList = []
for tmpSiteName in scanSiteList:
tmpSiteSpec = self.siteMapper.getSite(tmpSiteName)
# check site status
skipFlag = False
if tmpSiteSpec.status != 'online':
skipFlag = True
if not skipFlag:
newScanSiteList.append(tmpSiteName)
else:
tmpLog.debug(' skip %s due to status=%s' % (tmpSiteName,tmpSiteSpec.status))
scanSiteList = newScanSiteList
tmpLog.debug('{0} candidates passed site status check'.format(len(scanSiteList)))
if scanSiteList == []:
tmpLog.error('no candidates')
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
return retTmpError
######################################
# selection for memory
minRamCount = max(taskSpec.ramCount, inputChunk.ramCount)
if not minRamCount in [0,None]:
newScanSiteList = []
for tmpSiteName in scanSiteList:
tmpSiteSpec = self.siteMapper.getSite(tmpSiteName)
# check at the site
if tmpSiteSpec.maxmemory != 0 and minRamCount != 0 and minRamCount > tmpSiteSpec.maxmemory:
tmpLog.debug(' skip {0} due to site RAM shortage={1}(site upper limit) < {2}'.format(tmpSiteName,
tmpSiteSpec.maxmemory,
minRamCount))
continue
if tmpSiteSpec.minmemory != 0 and minRamCount != 0 and minRamCount < tmpSiteSpec.minmemory:
tmpLog.debug(' skip {0} due to job RAM shortage={1}(site lower limit) > {2}'.format(tmpSiteName,
tmpSiteSpec.minmemory,
minRamCount))
continue
newScanSiteList.append(tmpSiteName)
scanSiteList = newScanSiteList
tmpLog.debug('{0} candidates passed memory check ={1}{2}'.format(len(scanSiteList),
minRamCount,taskSpec.ramUnit))
if scanSiteList == []:
tmpLog.error('no candidates')
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
return retTmpError
######################################
# selection for scratch disk
minDiskCountS = taskSpec.getOutDiskSize() + taskSpec.getWorkDiskSize() + inputChunk.getMaxAtomSize()
minDiskCountS = minDiskCountS / 1024 / 1024
# size for direct IO sites
if taskSpec.useLocalIO():
minDiskCountR = minDiskCountS
else:
minDiskCountR = taskSpec.getOutDiskSize() + taskSpec.getWorkDiskSize()
minDiskCountR = minDiskCountR / 1024 / 1024
newScanSiteList = []
for tmpSiteName in scanSiteList:
tmpSiteSpec = self.siteMapper.getSite(tmpSiteName)
# check at the site
if tmpSiteSpec.maxwdir != 0:
if tmpSiteSpec.isDirectIO():
minDiskCount = minDiskCountR
else:
minDiskCount = minDiskCountS
if minDiskCount > tmpSiteSpec.maxwdir:
tmpLog.debug(' skip {0} due to small scratch disk={1} < {2}'.format(tmpSiteName,
tmpSiteSpec.maxwdir,
minDiskCount))
continue
newScanSiteList.append(tmpSiteName)
scanSiteList = newScanSiteList
tmpLog.debug('{0} candidates passed scratch disk check'.format(len(scanSiteList)))
if scanSiteList == []:
tmpLog.error('no candidates')
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
return retTmpError
######################################
# selection for available space in SE
newScanSiteList = []
for tmpSiteName in scanSiteList:
# check at the site
tmpSiteSpec = self.siteMapper.getSite(tmpSiteName)
#.........这里部分代码省略.........
示例3: doBrokerage
# 需要导入模块: from pandajedi.jedicore.MsgWrapper import MsgWrapper [as 别名]
# 或者: from pandajedi.jedicore.MsgWrapper.MsgWrapper import uploadLog [as 别名]
def doBrokerage(self,taskSpec,cloudName,inputChunk,taskParamMap):
# make logger
tmpLog = MsgWrapper(logger,'<jediTaskID={0}>'.format(taskSpec.jediTaskID),
monToken='<jediTaskID={0} {1}>'.format(taskSpec.jediTaskID,
datetime.datetime.utcnow().isoformat('/')))
tmpLog.debug('start')
# return for failure
retFatal = self.SC_FATAL,inputChunk
retTmpError = self.SC_FAILED,inputChunk
# get primary site candidates
sitePreAssigned = False
excludeList = []
includeList = None
scanSiteList = []
# get list of site access
siteAccessList = self.taskBufferIF.listSiteAccess(None,taskSpec.userName)
siteAccessMap = {}
for tmpSiteName,tmpAccess in siteAccessList:
siteAccessMap[tmpSiteName] = tmpAccess
# site limitation
if taskSpec.useLimitedSites():
if 'excludedSite' in taskParamMap:
excludeList = taskParamMap['excludedSite']
# str to list for task retry
try:
if type(excludeList) != types.ListType:
excludeList = excludeList.split(',')
except:
pass
if 'includedSite' in taskParamMap:
includeList = taskParamMap['includedSite']
# str to list for task retry
if includeList == '':
includeList = None
try:
if type(includeList) != types.ListType:
includeList = includeList.split(',')
except:
pass
# loop over all sites
for siteName,tmpSiteSpec in self.siteMapper.siteSpecList.iteritems():
if tmpSiteSpec.type == 'analysis':
scanSiteList.append(siteName)
# preassigned
if not taskSpec.site in ['',None]:
# site is pre-assigned
tmpLog.debug('site={0} is pre-assigned'.format(taskSpec.site))
sitePreAssigned = True
if not taskSpec.site in scanSiteList:
scanSiteList.append(taskSpec.site)
tmpLog.debug('initial {0} candidates'.format(len(scanSiteList)))
# allowed remote access protocol
allowedRemoteProtocol = 'fax'
# MP
if taskSpec.coreCount != None and taskSpec.coreCount > 1:
# use MCORE only
useMP = 'only'
elif taskSpec.coreCount == 0:
# use MCORE and normal
useMP = 'any'
else:
# not use MCORE
useMP = 'unuse'
######################################
# selection for status
newScanSiteList = []
for tmpSiteName in scanSiteList:
tmpSiteSpec = self.siteMapper.getSite(tmpSiteName)
# check site status
skipFlag = False
if tmpSiteSpec.status in ['offline']:
skipFlag = True
elif tmpSiteSpec.status in ['brokeroff','test']:
if not sitePreAssigned:
skipFlag = True
elif tmpSiteName != taskSpec.site:
skipFlag = True
if not skipFlag:
newScanSiteList.append(tmpSiteName)
else:
tmpLog.debug(' skip site=%s due to status=%s criteria=-status' % (tmpSiteName,tmpSiteSpec.status))
scanSiteList = newScanSiteList
tmpLog.debug('{0} candidates passed site status check'.format(len(scanSiteList)))
if scanSiteList == []:
tmpLog.error('no candidates')
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
# send info to logger
self.sendLogMessage(tmpLog)
return retTmpError
######################################
# selection for MP
if not sitePreAssigned:
newScanSiteList = []
for tmpSiteName in scanSiteList:
tmpSiteSpec = self.siteMapper.getSite(tmpSiteName)
# check at the site
if useMP == 'any' or (useMP == 'only' and tmpSiteSpec.coreCount > 1) or \
(useMP =='unuse' and tmpSiteSpec.coreCount in [0,1,None]):
newScanSiteList.append(tmpSiteName)
else:
#.........这里部分代码省略.........
示例4: doBrokerage
# 需要导入模块: from pandajedi.jedicore.MsgWrapper import MsgWrapper [as 别名]
# 或者: from pandajedi.jedicore.MsgWrapper.MsgWrapper import uploadLog [as 别名]
def doBrokerage(self,taskSpec,cloudName,inputChunk,taskParamMap):
# make logger
tmpLog = MsgWrapper(logger,'<jediTaskID={0}>'.format(taskSpec.jediTaskID),
monToken='<jediTaskID={0} {1}>'.format(taskSpec.jediTaskID,
datetime.datetime.utcnow().isoformat('/')))
tmpLog.debug('start')
# return for failure
retFatal = self.SC_FATAL,inputChunk
retTmpError = self.SC_FAILED,inputChunk
# get sites in the cloud
sitePreAssigned = False
siteListPreAssigned = False
if not taskSpec.site in ['',None]:
if ',' in taskSpec.site:
# site list
siteListPreAssigned = True
scanSiteList = taskSpec.site.split(',')
else:
# site
sitePreAssigned = True
scanSiteList = [taskSpec.site]
tmpLog.debug('site={0} is pre-assigned criteria=+preassign'.format(taskSpec.site))
elif inputChunk.getPreassignedSite() != None:
siteListPreAssigned = True
scanSiteList = DataServiceUtils.getSitesShareDDM(self.siteMapper,inputChunk.getPreassignedSite())
scanSiteList.append(inputChunk.getPreassignedSite())
tmpMsg = 'use site={0} since they share DDM endpoints with orinal_site={1} which is pre-assigned in masterDS '.format(str(scanSiteList),
inputChunk.getPreassignedSite())
tmpMsg += 'criteria=+premerge'
tmpLog.debug(tmpMsg)
else:
scanSiteList = self.siteMapper.getCloud(cloudName)['sites']
tmpLog.debug('cloud=%s has %s candidates' % (cloudName,len(scanSiteList)))
# get job statistics
tmpSt,jobStatMap = self.taskBufferIF.getJobStatisticsWithWorkQueue_JEDI(taskSpec.vo,taskSpec.prodSourceLabel)
if not tmpSt:
tmpLog.error('failed to get job statistics')
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
self.sendLogMessage(tmpLog)
return retTmpError
# T1
if not taskSpec.useWorldCloud():
t1Sites = [self.siteMapper.getCloud(cloudName)['source']]
# hospital sites
if self.hospitalQueueMap.has_key(cloudName):
t1Sites += self.hospitalQueueMap[cloudName]
else:
# get destination for WORLD cloud
t1Sites = []
tmpStat,datasetSpecList = self.taskBufferIF.getDatasetsWithJediTaskID_JEDI(taskSpec.jediTaskID,datasetTypes=['log'])
for datasetSpec in datasetSpecList:
if not datasetSpec.destination in t1Sites:
t1Sites.append(datasetSpec.destination)
# sites sharing SE with T1
sitesShareSeT1 = DataServiceUtils.getSitesShareDDM(self.siteMapper,t1Sites[0])
# all T1
allT1Sites = self.getAllT1Sites()
# core count
if inputChunk.isMerging and taskSpec.mergeCoreCount != None:
taskCoreCount = taskSpec.mergeCoreCount
else:
taskCoreCount = taskSpec.coreCount
# MP
if taskCoreCount != None and taskCoreCount > 1:
# use MCORE only
useMP = 'only'
elif taskCoreCount == 0:
# use MCORE and normal
useMP = 'any'
else:
# not use MCORE
useMP = 'unuse'
# get workQueue
workQueue = self.taskBufferIF.getWorkQueueMap().getQueueWithID(taskSpec.workQueue_ID)
######################################
# selection for status
if not sitePreAssigned:
newScanSiteList = []
for tmpSiteName in scanSiteList:
tmpSiteSpec = self.siteMapper.getSite(tmpSiteName)
# check site status
skipFlag = False
if tmpSiteSpec.status != 'online':
skipFlag = True
if not skipFlag:
newScanSiteList.append(tmpSiteName)
else:
tmpLog.debug(' skip site=%s due to status=%s criteria=-status' % (tmpSiteName,tmpSiteSpec.status))
scanSiteList = newScanSiteList
tmpLog.debug('{0} candidates passed site status check'.format(len(scanSiteList)))
if scanSiteList == []:
tmpLog.error('no candidates')
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
self.sendLogMessage(tmpLog)
return retTmpError
######################################
# selection for reprocessing
if taskSpec.processingType == 'reprocessing':
newScanSiteList = []
for tmpSiteName in scanSiteList:
#.........这里部分代码省略.........
示例5: doSetup
# 需要导入模块: from pandajedi.jedicore.MsgWrapper import MsgWrapper [as 别名]
# 或者: from pandajedi.jedicore.MsgWrapper.MsgWrapper import uploadLog [as 别名]
#.........这里部分代码省略.........
elif taskSpec.cloud != None:
# use T1 SE
tmpT1Name = siteMapper.getCloud(taskSpec.cloud)['source']
location = siteMapper.getDdmEndpoint(tmpT1Name,datasetSpec.storageToken)
else:
location = siteMapper.getDdmEndpoint(datasetSpec.site,datasetSpec.storageToken)
if locForRule == None:
locForRule = location
# set metadata
if taskSpec.prodSourceLabel in ['managed','test'] and targetName == datasetSpec.datasetName:
metaData = {}
metaData['task_id'] = taskSpec.jediTaskID
if not taskSpec.campaign in [None,'']:
metaData['campaign'] = taskSpec.campaign
if datasetSpec.getTransient() != None:
metaData['transient'] = datasetSpec.getTransient()
else:
metaData = None
# register dataset/container
tmpLog.info('registering {0} with location={1} backend={2} lifetime={3} meta={4}'.format(targetName,
location,
ddmBackEnd,
lifetime,
str(metaData)))
tmpStat = ddmIF.registerNewDataset(targetName,backEnd=ddmBackEnd,location=location,
lifetime=lifetime,metaData=metaData)
if not tmpStat:
tmpLog.error('failed to register {0}'.format(targetName))
return retFatal
# procedures for user
if userSetup or DataServiceUtils.getDistributedDestination(datasetSpec.storageToken) != None:
# register location
tmpToRegister = False
if userSetup and targetName == datasetSpec.datasetName and not datasetSpec.site in ['',None]:
userName = taskSpec.userName
grouping = None
tmpToRegister = True
elif DataServiceUtils.getDistributedDestination(datasetSpec.storageToken) != None:
userName = None
grouping = 'NONE'
tmpToRegister = True
if tmpToRegister:
activity = DataServiceUtils.getActivityForOut(taskSpec.prodSourceLabel)
tmpLog.info('registring location={0} lifetime={1}days activity={2} grouping={3}'.format(locForRule,lifetime,
activity,grouping))
tmpStat = ddmIF.registerDatasetLocation(targetName,locForRule,owner=userName,
lifetime=lifetime,backEnd=ddmBackEnd,
activity=activity,grouping=grouping)
if not tmpStat:
tmpLog.error('failed to register location {0} with {2} for {1}'.format(locForRule,
targetName,
ddmBackEnd))
return retFatal
avDatasetList.append(targetName)
else:
tmpLog.info('{0} already registered'.format(targetName))
# check if dataset is in the container
if datasetSpec.containerName != None and datasetSpec.containerName != datasetSpec.datasetName:
# get list of constituent datasets in the container
if not cnDatasetMap.has_key(datasetSpec.containerName):
cnDatasetMap[datasetSpec.containerName] = ddmIF.listDatasetsInContainer(datasetSpec.containerName)
# add dataset
if not datasetSpec.datasetName in cnDatasetMap[datasetSpec.containerName]:
tmpLog.info('adding {0} to {1}'.format(datasetSpec.datasetName,datasetSpec.containerName))
tmpStat = ddmIF.addDatasetsToContainer(datasetSpec.containerName,[datasetSpec.datasetName],
backEnd=ddmBackEnd)
if not tmpStat:
tmpLog.error('failed to add {0} to {1}'.format(datasetSpec.datasetName,
datasetSpec.containerName))
return retFatal
cnDatasetMap[datasetSpec.containerName].append(datasetSpec.datasetName)
else:
tmpLog.info('{0} already in {1}'.format(datasetSpec.datasetName,datasetSpec.containerName))
# update dataset
datasetSpec.status = 'registered'
self.taskBufferIF.updateDataset_JEDI(datasetSpec,{'jediTaskID':taskSpec.jediTaskID,
'datasetID':datasetID})
# open datasets
if taskSpec.prodSourceLabel in ['managed','test']:
# get the list of output/log datasets
outDatasetList = []
for tmpPandaJob in pandaJobs:
for tmpFileSpec in tmpPandaJob.Files:
if tmpFileSpec.type in ['output','log']:
if not tmpFileSpec.destinationDBlock in outDatasetList:
outDatasetList.append(tmpFileSpec.destinationDBlock)
# open datasets
for outDataset in outDatasetList:
tmpLog.info('open {0}'.format(outDataset))
ddmIF.openDataset(outDataset)
# unset lifetime
ddmIF.setDatasetMetadata(outDataset,'lifetime',None)
# return
tmpLog.info('done')
return retOK
except:
errtype,errvalue = sys.exc_info()[:2]
tmpLog.error('doSetup failed with {0}:{1}'.format(errtype.__name__,errvalue))
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
return retFatal
示例6: runImpl
# 需要导入模块: from pandajedi.jedicore.MsgWrapper import MsgWrapper [as 别名]
# 或者: from pandajedi.jedicore.MsgWrapper.MsgWrapper import uploadLog [as 别名]
def runImpl(self):
# cutoff for disk in TB
diskThreshold = self.taskBufferIF.getConfigValue(self.msgType, 'DISK_THRESHOLD_{0}'.format(self.workQueue.queue_name),
'jedi', 'atlas')
if diskThreshold is None:
diskThreshold = 100 * 1024
# dataset type to ignore file availability check
datasetTypeToSkipCheck = ['log']
# thresholds for data availability check
thrInputSize = self.taskBufferIF.getConfigValue(self.msgType, 'INPUT_SIZE_THRESHOLD', 'jedi', 'atlas')
if thrInputSize is None:
thrInputSize = 1
thrInputSize *= 1024*1024*1024
thrInputNum = self.taskBufferIF.getConfigValue(self.msgType, 'INPUT_NUM_THRESHOLD', 'jedi', 'atlas')
if thrInputNum is None:
thrInputNum = 100
thrInputSizeFrac = self.taskBufferIF.getConfigValue(self.msgType, 'INPUT_SIZE_FRACTION', 'jedi', 'atlas')
if thrInputSizeFrac is None:
thrInputSizeFrac = 10
thrInputSizeFrac = float(thrInputSizeFrac) / 100
thrInputNumFrac = self.taskBufferIF.getConfigValue(self.msgType, 'INPUT_NUM_FRACTION', 'jedi', 'atlas')
if thrInputNumFrac is None:
thrInputNumFrac = 10
thrInputNumFrac = float(thrInputNumFrac) / 100
cutOffRW = 50
negWeightTape = 0.001
# main
lastJediTaskID = None
siteMapper = self.taskBufferIF.getSiteMapper()
while True:
try:
taskInputList = self.inputList.get(1)
# no more datasets
if len(taskInputList) == 0:
self.logger.debug('{0} terminating after processing {1} tasks since no more inputs '.format(self.__class__.__name__,
self.numTasks))
return
# loop over all tasks
for taskSpec,inputChunk in taskInputList:
lastJediTaskID = taskSpec.jediTaskID
# make logger
tmpLog = MsgWrapper(self.logger,'<jediTaskID={0}>'.format(taskSpec.jediTaskID),monToken='jediTaskID={0}'.format(taskSpec.jediTaskID))
tmpLog.debug('start')
tmpLog.info('thrInputSize:{0} thrInputNum:{1} thrInputSizeFrac:{2} thrInputNumFrac;{3}'.format(thrInputSize,
thrInputNum,
thrInputSizeFrac,
thrInputNumFrac))
# RW
taskRW = self.taskBufferIF.calculateTaskWorldRW_JEDI(taskSpec.jediTaskID)
# get nuclei
nucleusList = siteMapper.nuclei
if taskSpec.nucleus in nucleusList:
candidateNucleus = taskSpec.nucleus
else:
tmpLog.info('got {0} candidates'.format(len(nucleusList)))
######################################
# check status
newNucleusList = {}
for tmpNucleus,tmpNucleusSpec in nucleusList.iteritems():
if not tmpNucleusSpec.state in ['ACTIVE']:
tmpLog.info(' skip nucleus={0} due to status={1} criteria=-status'.format(tmpNucleus,
tmpNucleusSpec.state))
else:
newNucleusList[tmpNucleus] = tmpNucleusSpec
nucleusList = newNucleusList
tmpLog.info('{0} candidates passed status check'.format(len(nucleusList)))
if nucleusList == {}:
tmpLog.error('no candidates')
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
self.sendLogMessage(tmpLog)
continue
######################################
# check status of transfer backlog
t1Weight = taskSpec.getT1Weight()
if t1Weight < 0:
tmpLog.info('skip transfer backlog check due to negative T1Weight')
else:
newNucleusList = {}
backlogged_nuclei = self.taskBufferIF.getBackloggedNuclei()
for tmpNucleus, tmpNucleusSpec in nucleusList.iteritems():
if tmpNucleus in backlogged_nuclei:
tmpLog.info(' skip nucleus={0} due to long transfer backlog criteria=-transfer_backlog'.
format(tmpNucleus))
else:
newNucleusList[tmpNucleus] = tmpNucleusSpec
nucleusList = newNucleusList
tmpLog.info('{0} candidates passed transfer backlog check'.format(len(nucleusList)))
if nucleusList == {}:
tmpLog.error('no candidates')
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
self.sendLogMessage(tmpLog)
continue
######################################
# check endpoint
fractionFreeSpace = {}
newNucleusList = {}
tmpStat,tmpDatasetSpecList = self.taskBufferIF.getDatasetsWithJediTaskID_JEDI(taskSpec.jediTaskID,
['output','log'])
for tmpNucleus,tmpNucleusSpec in nucleusList.iteritems():
toSkip = False
#.........这里部分代码省略.........