本文整理汇总了Python中taskbuffer.JobSpec.JobSpec.AtlasRelease方法的典型用法代码示例。如果您正苦于以下问题:Python JobSpec.AtlasRelease方法的具体用法?Python JobSpec.AtlasRelease怎么用?Python JobSpec.AtlasRelease使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类taskbuffer.JobSpec.JobSpec
的用法示例。
在下文中一共展示了JobSpec.AtlasRelease方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: defineEvgen16Job
# 需要导入模块: from taskbuffer.JobSpec import JobSpec [as 别名]
# 或者: from taskbuffer.JobSpec.JobSpec import AtlasRelease [as 别名]
def defineEvgen16Job(self, i):
"""Define an Evgen16 job based on predefined values and randomly generated names
"""
job = JobSpec()
job.computingSite = self.__site
job.cloud = self.__cloud
job.jobDefinitionID = int(time.time()) % 10000
job.jobName = "%s_%d" % (uuid.uuid1(), i)
job.AtlasRelease = 'Atlas-16.6.2'
job.homepackage = 'AtlasProduction/16.6.2.1'
job.transformation = 'Evgen_trf.py'
job.destinationDBlock = self.__datasetName
job.destinationSE = self.__destName
job.currentPriority = 10000
job.prodSourceLabel = 'test'
job.cmtConfig = 'i686-slc5-gcc43-opt'
#Output file
fileO = FileSpec()
fileO.lfn = "%s.evgen.pool.root" % job.jobName
fileO.destinationDBlock = job.destinationDBlock
fileO.destinationSE = job.destinationSE
fileO.dataset = job.destinationDBlock
fileO.destinationDBlockToken = 'ATLASDATADISK'
fileO.type = 'output'
job.addFile(fileO)
#Log file
fileL = FileSpec()
fileL.lfn = "%s.job.log.tgz" % job.jobName
fileL.destinationDBlock = job.destinationDBlock
fileL.destinationSE = job.destinationSE
fileL.dataset = job.destinationDBlock
fileL.destinationDBlockToken = 'ATLASDATADISK'
fileL.type = 'log'
job.addFile(fileL)
job.jobParameters = "2760 105048 19901 101 200 MC10.105048.PythiaB_ccmu3mu1X.py %s NONE NONE NONE MC10JobOpts-latest-test.tar.gz" % fileO.lfn
return job
示例2: master_prepare
# 需要导入模块: from taskbuffer.JobSpec import JobSpec [as 别名]
# 或者: from taskbuffer.JobSpec.JobSpec import AtlasRelease [as 别名]
#.........这里部分代码省略.........
## if cloudSite==eachCloud:
## cloud=cloudID
## outclouds.append(cloud)
## break
## print outclouds
## # finally, matching with user's wishes
## if len(outclouds)>0:
## if not job.backend.requirements.cloud: # no user wish, update
## job.backend.requirements.cloud=outclouds[0]
## else:
## try:
## assert job.backend.requirements.cloud in outclouds
## except:
## raise ApplicationConfigurationError(None,'Input dataset not available in target cloud %s. Please try any of the following %s' % (job.backend.requirements.cloud, str(outclouds)))
from GangaPanda.Lib.Panda.Panda import runPandaBrokerage
runPandaBrokerage(job)
if job.backend.site == 'AUTO':
raise ApplicationConfigurationError(None,'site is still AUTO after brokerage!')
# output dataset preparation and registration
try:
outDsLocation = Client.PandaSites[job.backend.site]['ddm']
except:
raise ApplicationConfigurationError(None,"Could not extract output dataset location from job.backend.site value: %s. Aborting" % job.backend.site)
if not app.dryrun:
for outtype in app.outputpaths.keys():
dset=string.replace(app.outputpaths[outtype],"/",".")
dset=dset[1:]
# dataset registration must be done only once.
print "registering output dataset %s at %s" % (dset,outDsLocation)
try:
Client.addDataset(dset,False,location=outDsLocation)
dq2_set_dataset_lifetime(dset, location=outDsLocation)
except:
raise ApplicationConfigurationError(None,"Fail to create output dataset %s. Aborting" % dset)
# extend registration to build job lib dataset:
print "registering output dataset %s at %s" % (self.libDataset,outDsLocation)
try:
Client.addDataset(self.libDataset,False,location=outDsLocation)
dq2_set_dataset_lifetime(self.libDataset, outDsLocation)
except:
raise ApplicationConfigurationError(None,"Fail to create output dataset %s. Aborting" % self.libDataset)
###
cacheVer = "-AtlasProduction_" + str(app.prod_release)
logger.debug("master job submit?")
self.outsite=job.backend.site
if app.se_name and app.se_name != "none" and not self.outsite:
self.outsite=app.se_name
# create build job
jspec = JobSpec()
jspec.jobDefinitionID = job.id
jspec.jobName = commands.getoutput('uuidgen 2> /dev/null')
jspec.AtlasRelease = 'Atlas-%s' % app.atlas_rel
jspec.homepackage = 'AnalysisTransforms'+cacheVer#+nightVer
jspec.transformation = '%s/buildJob-00-00-03' % Client.baseURLSUB # common base to Athena and AthenaMC jobs: buildJob is a pilot job which takes care of all inputs for the real jobs (in prepare()
jspec.destinationDBlock = self.libDataset
jspec.destinationSE = job.backend.site
jspec.prodSourceLabel = 'panda'
jspec.assignedPriority = 2000
jspec.computingSite = job.backend.site
jspec.cloud = job.backend.requirements.cloud
# jspec.jobParameters = self.args not known yet
jspec.jobParameters = '-o %s' % (self.library)
if app.userarea:
print app.userarea
jspec.jobParameters += ' -i %s' % (os.path.basename(app.userarea))
else:
jspec.jobParameters += ' -i %s' % (sources)
jspec.cmtConfig = AthenaUtils.getCmtConfig(athenaVer=app.atlas_rel)
matchURL = re.search('(http.*://[^/]+)/',Client.baseURLSSL)
if matchURL:
jspec.jobParameters += ' --sourceURL %s' % matchURL.group(1)
fout = FileSpec()
fout.lfn = self.library
fout.type = 'output'
fout.dataset = self.libDataset
fout.destinationDBlock = self.libDataset
jspec.addFile(fout)
flog = FileSpec()
flog.lfn = '%s.log.tgz' % self.libDataset
flog.type = 'log'
flog.dataset = self.libDataset
flog.destinationDBlock = self.libDataset
jspec.addFile(flog)
#print "MASTER JOB DETAILS:",jspec.jobParameters
return jspec
示例3: prepare
# 需要导入模块: from taskbuffer.JobSpec import JobSpec [as 别名]
# 或者: from taskbuffer.JobSpec.JobSpec import AtlasRelease [as 别名]
def prepare(self,app,appconfig,appmasterconfig,jobmasterconfig):
'''prepare the subjob specific configuration'''
# PandaTools
from pandatools import Client
from pandatools import AthenaUtils
from taskbuffer.JobSpec import JobSpec
from taskbuffer.FileSpec import FileSpec
job = app._getParent()
logger.debug('AthenaMCPandaRTHandler prepare called for %s', job.getFQID('.'))
try:
assert self.outsite
except:
logger.error("outsite not set. Aborting")
raise Exception()
job.backend.site = self.outsite
job.backend.actualCE = self.outsite
cloud = job._getRoot().backend.requirements.cloud
job.backend.requirements.cloud = cloud
# now just filling the job from AthenaMC data
jspec = JobSpec()
jspec.jobDefinitionID = job._getRoot().id
jspec.jobName = commands.getoutput('uuidgen 2> /dev/null')
jspec.AtlasRelease = 'Atlas-%s' % app.atlas_rel
if app.transform_archive:
jspec.homepackage = 'AnalysisTransforms'+app.transform_archive
elif app.prod_release:
jspec.homepackage = 'AnalysisTransforms-AtlasProduction_'+str(app.prod_release)
jspec.transformation = '%s/runAthena-00-00-11' % Client.baseURLSUB
#---->???? prodDBlock and destinationDBlock when facing several input / output datasets?
jspec.prodDBlock = 'NULL'
if job.inputdata and len(app.inputfiles)>0 and app.inputfiles[0] in app.dsetmap:
jspec.prodDBlock = app.dsetmap[app.inputfiles[0]]
# How to specify jspec.destinationDBlock when more than one type of output is available? Panda prod jobs seem to specify only the last output dataset
outdset=""
for type in ["EVNT","RDO","HITS","AOD","ESD","NTUP"]:
if type in app.outputpaths.keys():
outdset=string.replace(app.outputpaths[type],"/",".")
outdset=outdset[1:-1]
break
if not outdset:
try:
assert len(app.outputpaths.keys())>0
except:
logger.error("app.outputpaths is empty: check your output datasets")
raise
type=app.outputpaths.keys()[0]
outdset=string.replace(app.outputpaths[type],"/",".")
outdset=outdset[1:-1]
jspec.destinationDBlock = outdset
jspec.destinationSE = self.outsite
jspec.prodSourceLabel = 'user'
jspec.assignedPriority = 1000
jspec.cloud = cloud
# memory
if job.backend.requirements.memory != -1:
jspec.minRamCount = job.backend.requirements.memory
jspec.computingSite = self.outsite
jspec.cmtConfig = AthenaUtils.getCmtConfig(athenaVer=app.atlas_rel)
# library (source files)
flib = FileSpec()
flib.lfn = self.library
# flib.GUID =
flib.type = 'input'
# flib.status =
flib.dataset = self.libDataset
flib.dispatchDBlock = self.libDataset
jspec.addFile(flib)
# input files FIXME: many more input types
for lfn in app.inputfiles:
useguid=app.turls[lfn].replace("guid:","")
finp = FileSpec()
finp.lfn = lfn
finp.GUID = useguid
finp.dataset = app.dsetmap[lfn]
finp.prodDBlock = app.dsetmap[lfn]
finp.prodDBlockToken = 'local'
finp.dispatchDBlock = app.dsetmap[lfn]
finp.type = 'input'
finp.status = 'ready'
jspec.addFile(finp)
# add dbfiles if any:
for lfn in app.dbfiles:
useguid=app.dbturls[lfn].replace("guid:","")
finp = FileSpec()
finp.lfn = lfn
finp.GUID = useguid
finp.dataset = app.dsetmap[lfn]
#.........这里部分代码省略.........
示例4: len
# 需要导入模块: from taskbuffer.JobSpec import JobSpec [as 别名]
# 或者: from taskbuffer.JobSpec.JobSpec import AtlasRelease [as 别名]
if len(sys.argv)>1:
site = sys.argv[1]
else:
site = None
datasetName = 'panda.destDB.%s' % commands.getoutput('uuidgen')
destName = 'BNL_ATLAS_2'
jobList = []
for i in range(20):
job = JobSpec()
job.jobDefinitionID = int(time.time()) % 10000
job.jobName = commands.getoutput('uuidgen')
job.AtlasRelease = 'Atlas-11.0.41'
#job.AtlasRelease = 'Atlas-11.0.3'
job.homepackage = 'AnalysisTransforms'
job.transformation = 'https://gridui01.usatlas.bnl.gov:24443/dav/test/runAthena'
job.destinationDBlock = datasetName
job.destinationSE = destName
job.currentPriority = 100
job.prodSourceLabel = 'user'
job.computingSite = site
#job.prodDBlock = "pandatest.b1599dfa-cd36-4fc5-92f6-495781a94c66"
job.prodDBlock = "pandatest.f228b051-077b-4f81-90bf-496340644379"
fileI = FileSpec()
fileI.dataset = job.prodDBlock
fileI.prodDBlock = job.prodDBlock
fileI.lfn = "lib.f228b051-077b-4f81-90bf-496340644379.tgz"
示例5: run
# 需要导入模块: from taskbuffer.JobSpec import JobSpec [as 别名]
# 或者: from taskbuffer.JobSpec.JobSpec import AtlasRelease [as 别名]
#.........这里部分代码省略.........
if not tmpSiteID.startswith('ANALY_'):
continue
# remove test and local
if re.search('_test',tmpSiteID,re.I) != None:
continue
if re.search('_local',tmpSiteID,re.I) != None:
continue
# avoid same site
if self.avoidSameSite and self.getAggName(tmpSiteSpec.ddm) == origSiteDDM:
continue
# check DQ2 ID
if self.cloud in [None,tmpSiteSpec.cloud] \
and (self.getAggName(tmpSiteSpec.ddm) in maxDQ2Sites or inputDS == []):
# excluded sites
excludedFlag = False
for tmpExcSite in self.excludedSite:
if re.search(tmpExcSite,tmpSiteID) != None:
excludedFlag = True
break
if excludedFlag:
_logger.debug("%s skip %s since excluded" % (self.token,tmpSiteID))
continue
# use online only
if tmpSiteSpec.status != 'online':
_logger.debug("%s skip %s status=%s" % (self.token,tmpSiteID,tmpSiteSpec.status))
continue
# check maxinputsize
if (maxFileSize == None and origMaxInputSize > siteMapper.getSite(tmpSiteID).maxinputsize) or \
maxFileSize > siteMapper.getSite(tmpSiteID).maxinputsize:
_logger.debug("%s skip %s due to maxinputsize" % (self.token,tmpSiteID))
continue
# append
if not tmpSiteID in maxPandaSites:
maxPandaSites.append(tmpSiteID)
# choose at most 20 sites randomly to avoid too many lookup
random.shuffle(maxPandaSites)
maxPandaSites = maxPandaSites[:20]
_logger.debug("%s candidate PandaSites -> %s" % (self.token,str(maxPandaSites)))
# no Panda siteIDs
if maxPandaSites == []:
_logger.debug("%s no Panda site candidate" % self.token)
else:
# set AtlasRelease and cmtConfig to dummy job
tmpJobForBrokerage = JobSpec()
if self.job.AtlasRelease in ['NULL',None]:
tmpJobForBrokerage.AtlasRelease = ''
else:
tmpJobForBrokerage.AtlasRelease = self.job.AtlasRelease
# use nightlies
matchNight = re.search('^AnalysisTransforms-.*_(rel_\d+)$',self.job.homepackage)
if matchNight != None:
tmpJobForBrokerage.AtlasRelease += ':%s' % matchNight.group(1)
# use cache
else:
matchCache = re.search('^AnalysisTransforms-([^/]+)',self.job.homepackage)
if matchCache != None:
tmpJobForBrokerage.AtlasRelease = matchCache.group(1).replace('_','-')
if not self.job.cmtConfig in ['NULL',None]:
tmpJobForBrokerage.cmtConfig = self.job.cmtConfig
# memory size
if not self.job.minRamCount in ['NULL',None,0]:
tmpJobForBrokerage.minRamCount = self.job.minRamCount
# CPU count
if not self.job.maxCpuCount in ['NULL',None,0]:
tmpJobForBrokerage.maxCpuCount = self.job.maxCpuCount
# run brokerage
brokerage.broker.schedule([tmpJobForBrokerage],self.taskBuffer,siteMapper,forAnalysis=True,
setScanSiteList=maxPandaSites,trustIS=True,reportLog=True)
newSiteID = tmpJobForBrokerage.computingSite
self.brokerageInfo += tmpJobForBrokerage.brokerageErrorDiag
_logger.debug("%s runBrokerage - > %s" % (self.token,newSiteID))
# unknown site
if not siteMapper.checkSite(newSiteID):
_logger.error("%s unknown site" % self.token)
_logger.debug("%s failed" % self.token)
return
# get new site spec
newSiteSpec = siteMapper.getSite(newSiteID)
# avoid repetition
if self.getAggName(newSiteSpec.ddm) == origSiteDDM:
_logger.debug("%s assigned to the same site %s " % (self.token,newSiteID))
_logger.debug("%s end" % self.token)
return
# simulation mode
if self.simulation:
_logger.debug("%s end simulation" % self.token)
return
# prepare jobs
status = self.prepareJob(newSiteID,newSiteSpec)
if status:
# run SetUpper
statusSetUp = self.runSetUpper()
if not statusSetUp:
_logger.debug("%s runSetUpper failed" % self.token)
else:
_logger.debug("%s successfully assigned to %s" % (self.token,newSiteID))
_logger.debug("%s end" % self.token)
except:
errType,errValue,errTraceBack = sys.exc_info()
_logger.error("%s run() : %s %s" % (self.token,errType,errValue))
示例6: JobSpec
# 需要导入模块: from taskbuffer.JobSpec import JobSpec [as 别名]
# 或者: from taskbuffer.JobSpec.JobSpec import AtlasRelease [as 别名]
import time
import commands
import userinterface.Client as Client
from taskbuffer.JobSpec import JobSpec
from taskbuffer.FileSpec import FileSpec
job = JobSpec()
job.jobDefinitionID = int(time.time()) % 10000
job.jobName = commands.getoutput('/usr/bin/uuidgen')
job.AtlasRelease = 'Atlas-9.0.4'
job.prodDBlock = 'pandatest.000003.dd.input'
job.destinationDBlock = 'panda.destDB.%s' % commands.getoutput('/usr/bin/uuidgen')
job.destinationSE = 'BNL_SE'
ids = {'pandatest.000003.dd.input._00028.junk':'6c19e1fc-ee8c-4bae-bd4c-c9e5c73aca27',
'pandatest.000003.dd.input._00033.junk':'98f79ba1-1793-4253-aac7-bdf90a51d1ee',
'pandatest.000003.dd.input._00039.junk':'33660dd5-7cef-422a-a7fc-6c24cb10deb1'}
for lfn in ids.keys():
file = FileSpec()
file.lfn = lfn
file.GUID = ids[file.lfn]
file.dataset = 'pandatest.000003.dd.input'
file.type = 'input'
job.addFile(file)
s,o = Client.submitJobs([job])
print "---------------------"
print s
print o
print "---------------------"
s,o = Client.getJobStatus([4934, 4766, 4767, 4768, 4769])
示例7: JobSpec
# 需要导入模块: from taskbuffer.JobSpec import JobSpec [as 别名]
# 或者: from taskbuffer.JobSpec.JobSpec import AtlasRelease [as 别名]
datasetName = 'panda.destDB.%s' % commands.getoutput('uuidgen')
files = {
inputFile:None,
}
jobList = []
index = 0
for lfn in files.keys():
index += 1
job = JobSpec()
job.jobDefinitionID = (time.time()) % 10000
job.jobName = "%s_%d" % (commands.getoutput('uuidgen'),index)
job.AtlasRelease = 'Atlas-17.0.5'
job.homepackage = 'AtlasProduction/17.0.5.6'
job.transformation = 'AtlasG4_trf.py'
job.destinationDBlock = datasetName
job.computingSite = site
job.prodDBlock = prodDBlock
job.prodSourceLabel = 'test'
job.processingType = 'test'
job.currentPriority = 10000
job.cloud = cloud
job.cmtConfig = 'i686-slc5-gcc43-opt'
fileI = FileSpec()
fileI.dataset = job.prodDBlock
fileI.prodDBlock = job.prodDBlock
示例8: JobSpec
# 需要导入模块: from taskbuffer.JobSpec import JobSpec [as 别名]
# 或者: from taskbuffer.JobSpec.JobSpec import AtlasRelease [as 别名]
datasetName = 'panda.destDB.%s' % commands.getoutput('uuidgen')
destName = None
files = {
'daq.ATLAS.0092045.physics.RPCwBeam.LB0016.SFO-2._0009.data':None,
}
jobList = []
index = 0
for lfn in files.keys():
index += 1
job = JobSpec()
job.jobDefinitionID = int(time.time()) % 10000
job.jobName = "%s_%d" % (commands.getoutput('uuidgen'),index)
job.AtlasRelease = 'Atlas-14.4.0'
job.homepackage = 'AtlasTier0/14.4.0.2'
job.transformation = 'Reco_trf.py'
job.destinationDBlock = datasetName
job.destinationSE = destName
job.computingSite = site
job.prodDBlock = 'data08_cos.00092045.physics_RPCwBeam.daq.RAW.o4_T1224560091'
job.prodSourceLabel = 'test'
job.processingType = 'reprocessing'
job.currentPriority = 10000
job.cloud = cloud
job.cmtConfig = 'i686-slc4-gcc34-opt'
origParams = """inputBSFile=daq.ATLAS.0092045.physics.RPCwBeam.LB0016.SFO-2._0009.data maxEvents=5 skipEvents=0 autoConfiguration=FieldAndGeo preInclude=RecExCommission/RecExCommission.py,RecExCommission/MinimalCommissioningSetup.py,RecJobTransforms/UseOracle.py preExec="jetFlags.Enabled.set_Value_and_Lock(False)" DBRelease=DBRelease-6.2.1.5.tar.gz conditionsTag=COMCOND-ES1C-000-00 RunNumber=92045 beamType=cosmics AMITag=r595 projectName=data08_cos trigStream=physics_RPCwBeam outputTypes=DPDCOMM outputESDFile=ESD.029868._01110.pool.root outputTAGComm=TAG_COMM.029868._01110.pool.root outputAODFile=AOD.029868._01110.pool.root outputMergedDQMonitorFile=DQM_MERGED.029868._01110.root DPD_PIXELCOMM=DPD_PIXELCOMM.029868._01110.pool.root DPD_SCTCOMM=DPD_SCTCOMM.029868._01110.pool.root DPD_IDCOMM=DPD_IDCOMM.029868._01110.pool.root DPD_IDPROJCOMM=DPD_IDPROJCOMM.029868._01110.pool.root DPD_CALOCOMM=DPD_CALOCOMM.029868._01110.pool.root DPD_TILECOMM=DPD_TILECOMM.029868._01110.pool.root DPD_EMCLUSTCOMM=DPD_EMCLUSTCOMM.029868._01110.pool.root DPD_EGAMMACOMM=DPD_EGAMMACOMM.029868._01110.pool.root DPD_RPCCOMM=DPD_RPCCOMM.029868._01110.pool.root DPD_TGCCOMM=DPD_TGCCOMM.029868._01110.pool.root --ignoreunknown"""
示例9: len
# 需要导入模块: from taskbuffer.JobSpec import JobSpec [as 别名]
# 或者: from taskbuffer.JobSpec.JobSpec import AtlasRelease [as 别名]
if len(sys.argv)>1:
site = sys.argv[1]
else:
site = None
datasetName = 'panda.destDB.%s' % commands.getoutput('uuidgen')
#destName = 'BNL_SE'
jobList = []
for i in [999905,999906,999907]:
job = JobSpec()
job.jobDefinitionID = int(time.time()) % 10000
job.jobName = "%s_%d" % (commands.getoutput('uuidgen'),i)
job.AtlasRelease = 'Atlas-14.1.0'
job.homepackage = 'AtlasProduction/12.0.6.2'
job.transformation = 'csc_evgen_trf.py'
job.destinationDBlock = datasetName
#job.destinationSE = destName
job.currentPriority = 1000
job.prodSourceLabel = 'managed'
#job.prodSourceLabel = 'test'
#job.computingSite = site
job.cmtConfig = 'i686-slc4-gcc34-opt'
job.metadata = 'evgen;%s;%s;%s' % (str({'FR': 46, 'NL': 45, 'NDGF': 300, 'CERN': 19, 'TW': 44110, 'CA': 2922, 'DE': 9903, 'IT': 1168, 'US': 6226, 'UK': 1026, 'ES': 26619}),str({999907:100,999906:200,999905:300}),str({999905:100,999906:910,999907:500}))
#job.metadata = 'evgen;%s' % str({'FR': 46, 'NL': 45, 'NDGF': 300, 'CERN': 19, 'TW': 44110, 'CA': 2922, 'DE': 9903, 'IT': 1168, 'US': 6226, 'UK': 1026, 'ES': 26619})
#job.cloud = "UK"
job.taskID = i
示例10: JobSpec
# 需要导入模块: from taskbuffer.JobSpec import JobSpec [as 别名]
# 或者: from taskbuffer.JobSpec.JobSpec import AtlasRelease [as 别名]
# instantiate JobSpecs
iJob = 0
jobList = []
for line in taskFile:
iJob += 1
job = JobSpec()
# job ID ###### FIXME
job.jobDefinitionID = int(time.time()) % 10000
# job name
job.jobName = "%s_%05d.job" % (taskName,iJob)
# AtlasRelease
if len(re.findall('\.',trfVer)) > 2:
match = re.search('^(\d+\.\d+\.\d+)',trfVer)
job.AtlasRelease = 'Atlas-%s' % match.group(1)
else:
job.AtlasRelease = 'Atlas-%s' % trfVer
# homepackage
vers = trfVer.split('.')
if int(vers[0]) <= 11:
job.homepackage = 'JobTransforms'
for ver in vers:
job.homepackage += "-%02d" % int(ver)
else:
job.homepackage = 'AtlasProduction/%s' % trfVer
# trf
job.transformation = trf
job.destinationDBlock = oDatasets[0]
# prod DBlock
job.prodDBlock = iDataset
示例11: JobSpec
# 需要导入模块: from taskbuffer.JobSpec import JobSpec [as 别名]
# 或者: from taskbuffer.JobSpec.JobSpec import AtlasRelease [as 别名]
datasetName = 'panda.destDB.%s' % commands.getoutput('uuidgen')
destName = 'BNL_ATLAS_2'
files = {
'EVNT.019128._00011.pool.root.1':None,
}
jobList = []
index = 0
for lfn in files.keys():
index += 1
job = JobSpec()
job.jobDefinitionID = int(time.time()) % 10000
job.jobName = "%s_%d" % (commands.getoutput('uuidgen'),index)
job.AtlasRelease = 'Atlas-13.0.40'
job.homepackage = 'AtlasProduction/13.0.40.3'
job.transformation = 'csc_simul_trf.py'
job.destinationDBlock = datasetName
job.destinationSE = destName
job.computingSite = site
job.prodDBlock = 'valid1.005001.pythia_minbias.evgen.EVNT.e306_tid019128'
job.prodSourceLabel = 'test'
job.currentPriority = 10000
job.cloud = 'IT'
fileI = FileSpec()
fileI.dataset = job.prodDBlock
fileI.prodDBlock = job.prodDBlock
fileI.lfn = lfn
示例12: prepare
# 需要导入模块: from taskbuffer.JobSpec import JobSpec [as 别名]
# 或者: from taskbuffer.JobSpec.JobSpec import AtlasRelease [as 别名]
def prepare(self, app, appsubconfig, appmasterconfig, jobmasterconfig):
"""Prepare the specific aspec of each subjob.
Returns: subjobconfig list of objects understood by backends."""
from pandatools import Client
from pandatools import AthenaUtils
from taskbuffer.JobSpec import JobSpec
from taskbuffer.FileSpec import FileSpec
from GangaAtlas.Lib.ATLASDataset.DQ2Dataset import dq2_set_dataset_lifetime
from GangaPanda.Lib.Panda.Panda import refreshPandaSpecs
# make sure we have the correct siteType
refreshPandaSpecs()
job = app._getParent()
masterjob = job._getRoot()
logger.debug('ProdTransPandaRTHandler prepare called for %s',
job.getFQID('.'))
job.backend.actualCE = job.backend.site
job.backend.requirements.cloud = Client.PandaSites[job.backend.site]['cloud']
# check that the site is in a submit-able status
if not job.splitter or job.splitter._name != 'DQ2JobSplitter':
allowed_sites = job.backend.list_ddm_sites()
try:
outDsLocation = Client.PandaSites[job.backend.site]['ddm']
tmpDsExist = False
if (configPanda['processingType'].startswith('gangarobot') or configPanda['processingType'].startswith('hammercloud')):
#if Client.getDatasets(job.outputdata.datasetname):
if getDatasets(job.outputdata.datasetname):
tmpDsExist = True
logger.info('Re-using output dataset %s'%job.outputdata.datasetname)
if not configPanda['specialHandling']=='ddm:rucio' and not configPanda['processingType'].startswith('gangarobot') and not configPanda['processingType'].startswith('hammercloud') and not configPanda['processingType'].startswith('rucio_test'):
Client.addDataset(job.outputdata.datasetname,False,location=outDsLocation,allowProdDisk=True,dsExist=tmpDsExist)
logger.info('Output dataset %s registered at %s'%(job.outputdata.datasetname,outDsLocation))
dq2_set_dataset_lifetime(job.outputdata.datasetname, outDsLocation)
except exceptions.SystemExit:
raise BackendError('Panda','Exception in adding dataset %s: %s %s'%(job.outputdata.datasetname,sys.exc_info()[0],sys.exc_info()[1]))
# JobSpec.
jspec = JobSpec()
jspec.currentPriority = app.priority
jspec.jobDefinitionID = masterjob.id
jspec.jobName = commands.getoutput('uuidgen 2> /dev/null')
jspec.coreCount = app.core_count
jspec.AtlasRelease = 'Atlas-%s' % app.atlas_release
jspec.homepackage = app.home_package
jspec.transformation = app.transformation
jspec.destinationDBlock = job.outputdata.datasetname
if job.outputdata.location:
jspec.destinationSE = job.outputdata.location
else:
jspec.destinationSE = job.backend.site
if job.inputdata:
jspec.prodDBlock = job.inputdata.dataset[0]
else:
jspec.prodDBlock = 'NULL'
if app.prod_source_label:
jspec.prodSourceLabel = app.prod_source_label
else:
jspec.prodSourceLabel = configPanda['prodSourceLabelRun']
jspec.processingType = configPanda['processingType']
jspec.specialHandling = configPanda['specialHandling']
jspec.computingSite = job.backend.site
jspec.cloud = job.backend.requirements.cloud
jspec.cmtConfig = app.atlas_cmtconfig
if app.dbrelease == 'LATEST':
try:
latest_dbrelease = getLatestDBReleaseCaching()
except:
from pandatools import Client
latest_dbrelease = Client.getLatestDBRelease()
m = re.search('(.*):DBRelease-(.*)\.tar\.gz', latest_dbrelease)
if m:
self.dbrelease_dataset = m.group(1)
self.dbrelease = m.group(2)
else:
raise ApplicationConfigurationError(None, "Error retrieving LATEST DBRelease. Try setting application.dbrelease manually.")
else:
self.dbrelease_dataset = app.dbrelease_dataset
self.dbrelease = app.dbrelease
jspec.jobParameters = app.job_parameters
if self.dbrelease:
if self.dbrelease == 'current':
jspec.jobParameters += ' --DBRelease=current'
else:
if jspec.transformation.endswith("_tf.py") or jspec.transformation.endswith("_tf"):
jspec.jobParameters += ' --DBRelease=DBRelease-%s.tar.gz' % (self.dbrelease,)
else:
jspec.jobParameters += ' DBRelease=DBRelease-%s.tar.gz' % (self.dbrelease,)
dbspec = FileSpec()
dbspec.lfn = 'DBRelease-%s.tar.gz' % self.dbrelease
dbspec.dataset = self.dbrelease_dataset
dbspec.prodDBlock = jspec.prodDBlock
dbspec.type = 'input'
jspec.addFile(dbspec)
#.........这里部分代码省略.........
示例13: run
# 需要导入模块: from taskbuffer.JobSpec import JobSpec [as 别名]
# 或者: from taskbuffer.JobSpec.JobSpec import AtlasRelease [as 别名]
#.........这里部分代码省略.........
# update task
self.taskBuffer.updateTaskModTimeJEDI(self.jediTaskID)
else:
# get candidates
tmpRet,candidateMaps = self.pd2p.getCandidates(self.userDatasetName,checkUsedFile=False,
useHidden=True)
if not tmpRet:
self.endWithError('Failed to find candidate for destination')
return False
# collect all candidates
allCandidates = []
for tmpDS,tmpDsVal in candidateMaps.iteritems():
for tmpCloud,tmpCloudVal in tmpDsVal.iteritems():
for tmpSiteName in tmpCloudVal[0]:
if not tmpSiteName in allCandidates:
allCandidates.append(tmpSiteName)
if allCandidates == []:
self.endWithError('No candidate for destination')
return False
# get list of dataset (container) names
if eventPickNumSites > 1:
# decompose container to transfer datasets separately
tmpRet,tmpOut = self.pd2p.getListDatasetReplicasInContainer(self.userDatasetName)
if not tmpRet:
self.endWithError('Failed to get the size of %s' % self.userDatasetName)
return False
userDatasetNameList = tmpOut.keys()
else:
# transfer container at once
userDatasetNameList = [self.userDatasetName]
# loop over all datasets
sitesUsed = []
for tmpUserDatasetName in userDatasetNameList:
# get size of dataset container
tmpRet,totalInputSize = rucioAPI.getDatasetSize(tmpUserDatasetName)
if not tmpRet:
self.endWithError('Failed to get the size of %s' % tmpUserDatasetName)
return False
# run brokerage
tmpJob = JobSpec()
tmpJob.AtlasRelease = ''
self.putLog("run brokerage for %s" % tmpDS)
brokerage.broker.schedule([tmpJob],self.taskBuffer,self.siteMapper,True,allCandidates,
True,datasetSize=totalInputSize)
if tmpJob.computingSite.startswith('ERROR'):
self.endWithError('brokerage failed with %s' % tmpJob.computingSite)
return False
self.putLog("site -> %s" % tmpJob.computingSite)
# send transfer request
try:
tmpDN = rucioAPI.parse_dn(tmpDN)
tmpStatus,userInfo = rucioAPI.finger(tmpDN)
if not tmpStatus:
raise RuntimeError,'user info not found for {0} with {1}'.format(tmpDN,userInfo)
tmpDN = userInfo['nickname']
tmpDQ2ID = self.siteMapper.getSite(tmpJob.computingSite).ddm_input
tmpMsg = "%s ds=%s site=%s id=%s" % ('registerDatasetLocation for DaTRI ',
tmpUserDatasetName,
tmpDQ2ID,
tmpDN)
self.putLog(tmpMsg)
rucioAPI.registerDatasetLocation(tmpDS,[tmpDQ2ID],lifetime=14,owner=tmpDN,
activity="User Subscriptions")
self.putLog('OK')
except:
errType,errValue = sys.exc_info()[:2]
tmpStr = 'Failed to send transfer request : %s %s' % (errType,errValue)
tmpStr.strip()
tmpStr += traceback.format_exc()
self.endWithError(tmpStr)
return False
# list of sites already used
sitesUsed.append(tmpJob.computingSite)
self.putLog("used %s sites" % len(sitesUsed))
# set candidates
if len(sitesUsed) >= eventPickNumSites:
# reset candidates to limit the number of sites
allCandidates = sitesUsed
sitesUsed = []
else:
# remove site
allCandidates.remove(tmpJob.computingSite)
# send email notification for success
tmpMsg = 'A transfer request was successfully sent to Rucio.\n'
tmpMsg += 'Your task will get started once transfer is completed.'
self.sendEmail(True,tmpMsg)
try:
# unlock and delete evp file
fcntl.flock(self.evpFile.fileno(),fcntl.LOCK_UN)
self.evpFile.close()
os.remove(self.evpFileName)
except:
pass
# successfully terminated
self.putLog("end %s" % self.evpFileName)
return True
except:
errType,errValue = sys.exc_info()[:2]
self.endWithError('Got exception %s:%s %s' % (errType,errValue,traceback.format_exc()))
return False
示例14: range
# 需要导入模块: from taskbuffer.JobSpec import JobSpec [as 别名]
# 或者: from taskbuffer.JobSpec.JobSpec import AtlasRelease [as 别名]
else:
site = None
cloud = None
datasetName = 'panda.destDB.%s_tid999991' % commands.getoutput('uuidgen')
taskid = 999989
jobList = []
for i in range(1):
job = JobSpec()
job.jobDefinitionID = int(time.time()) % 10000
job.jobName = "%s_%d" % (commands.getoutput('uuidgen'),i)
# job.AtlasRelease = 'Atlas-12.0.6'
# job.homepackage = 'AtlasProduction/12.0.6.5'
job.AtlasRelease = 'Atlas-12.0.7'
job.homepackage = 'AtlasProduction/12.0.7.1'
job.transformation = 'csc_evgen_trf.py'
job.destinationDBlock = datasetName
# job.destinationSE = destName
# job.cloud = 'CA'
job.cloud = cloud
job.taskID = taskid
job.currentPriority = 1000
job.prodSourceLabel = 'test'
# job.prodSourceLabel = 'cloudtest'
job.computingSite = site
file = FileSpec()
file.lfn = "%s.evgen.pool.root" % job.jobName
示例15: len
# 需要导入模块: from taskbuffer.JobSpec import JobSpec [as 别名]
# 或者: from taskbuffer.JobSpec.JobSpec import AtlasRelease [as 别名]
import userinterface.Client as Client
from taskbuffer.JobSpec import JobSpec
from taskbuffer.FileSpec import FileSpec
if len(sys.argv)>2:
site = sys.argv[1]
cloud = sys.argv[2]
else:
site = "UTA_PAUL_TEST"
cloud = "US"
job = JobSpec()
job.jobDefinitionID = int(time.time()) % 10000
job.jobName = "%s_1" % commands.getoutput('uuidgen')
job.AtlasRelease = 'Atlas-20.1.4'
job.homepackage = 'AtlasProduction/20.1.4.14'
#job.AtlasRelease = 'Atlas-20.20.8'
#job.homepackage = 'AtlasProduction/20.20.8.4'
job.transformation = 'Reco_tf.py'
job.destinationDBlock = 'panda.destDB.%s' % commands.getoutput('uuidgen')
job.destinationSE = 'AGLT2_TEST'
job.prodDBlock = 'user.mlassnig:user.mlassnig.pilot.test.single.hits'
job.currentPriority = 1000
#job.prodSourceLabel = 'ptest'
job.prodSourceLabel = 'user'
job.computingSite = site
job.cloud = cloud
job.cmtConfig = 'x86_64-slc6-gcc48-opt'
job.specialHandling = 'ddm:rucio'
#job.transferType = 'direct'