本文整理汇总了Python中Ganga.GPIDev.Lib.Job.Job.inputdata方法的典型用法代码示例。如果您正苦于以下问题:Python Job.inputdata方法的具体用法?Python Job.inputdata怎么用?Python Job.inputdata使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Ganga.GPIDev.Lib.Job.Job
的用法示例。
在下文中一共展示了Job.inputdata方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_gaudi_subjob
# 需要导入模块: from Ganga.GPIDev.Lib.Job import Job [as 别名]
# 或者: from Ganga.GPIDev.Lib.Job.Job import inputdata [as 别名]
def create_gaudi_subjob(job, inputdata):
j = Job()
j.name = job.name
j.application = copy_app(job.application)
j.backend = job.backend # no need to deepcopy
if inputdata:
j.inputdata = inputdata
if hasattr(j.application,'extra'):
j.application.extra.inputdata = j.inputdata
else:
j.inputdata = None
if hasattr(j.application,'extra'):
j.application.extra.inputdata = BesDataset()
j.outputsandbox = job.outputsandbox[:]
j.outputdata = job.outputdata
return j
示例2: __make_subjob__
# 需要导入模块: from Ganga.GPIDev.Lib.Job import Job [as 别名]
# 或者: from Ganga.GPIDev.Lib.Job.Job import inputdata [as 别名]
def __make_subjob__(self, mj, guids, names, sjob_evnts=-1, sites=None):
"""
private method to create subjob object
"""
logger.debug('generating subjob to run %d events in-total on files: %s' % (sjob_evnts, repr(guids)))
j = Job()
j.name = mj.name
j.inputdata = mj.inputdata
if j.inputdata.type in ['','DQ2']:
j.inputdata.guids = guids
j.inputdata.names = names
j.outputdata = mj.outputdata
j.application = mj.application
if sjob_evnts != -1:
j.application.max_events = sjob_evnts
j.backend = mj.backend
if j.backend._name in ['LCG'] and j.backend.requirements._name == 'AtlasLCGRequirements':
if sites:
j.backend.requirements.sites = sites
j.inputsandbox = mj.inputsandbox
j.outputsandbox = mj.outputsandbox
return j
示例3: split
# 需要导入模块: from Ganga.GPIDev.Lib.Job import Job [as 别名]
# 或者: from Ganga.GPIDev.Lib.Job.Job import inputdata [as 别名]
def split(self,job):
from Ganga.GPIDev.Lib.Job import Job
logger.debug("AnaTaskSplitterJob split called")
sjl = []
transform = stripProxy(job.application.getTransform())
transform.setAppStatus(job.application, "removed")
# Do the splitting
for sj in self.subjobs:
j = Job()
j.inputdata = transform.partitions_data[sj-1]
j.outputdata = job.outputdata
j.application = job.application
j.application.atlas_environment.append("OUTPUT_FILE_NUMBER=%i" % sj)
j.backend = job.backend
if transform.partitions_sites:
if hasattr(j.backend.requirements, 'sites'):
j.backend.requirements.sites = transform.partitions_sites[sj-1]
else:
j.backend.site = transform.partitions_sites[sj-1]
j.inputsandbox = job.inputsandbox
j.outputsandbox = job.outputsandbox
sjl.append(j)
# Task handling
j.application.tasks_id = job.application.tasks_id
j.application.id = transform.getNewAppID(sj)
#transform.setAppStatus(j.application, "submitting")
if not job.application.tasks_id.startswith("00"):
job.application.tasks_id = "00:%s" % job.application.tasks_id
return sjl
示例4: _create_subjob
# 需要导入模块: from Ganga.GPIDev.Lib.Job import Job [as 别名]
# 或者: from Ganga.GPIDev.Lib.Job.Job import inputdata [as 别名]
def _create_subjob(self, job, dataset):
logger.debug("_create_subjob")
datatmp = []
logger.debug("dataset size: %s" % str(len(dataset)))
#logger.debug( "dataset: %s" % str(dataset) )
if isinstance(dataset, LHCbDataset):
for i in dataset:
if isType(i, DiracFile):
datatmp.append(i)
else:
logger.error(
"Unkown file-type %s, cannot perform split with file %s" % (type(i), str(i)))
from Ganga.Core.exceptions import GangaException
raise GangaException(
"Unkown file-type %s, cannot perform split with file %s" % (type(i), str(i)))
elif type(dataset) == type([]) or isType(dataset, GangaList()):
for file in dataset:
if type(file) == type(''):
datatmp.append(
allComponentFilters['gangafiles'](file, None))
elif isType(file, IGangaFile):
datatmp.append(file)
else:
logger.error("Unexpected type: %s" % str(type(file)))
logger.error(
"Wanted object to inherit from type: %s: %s" % (str(type(IGangaFile()))))
from Ganga.Core.exceptions import GangaException
x = GangaException(
"Unknown(unexpected) file object: %s" % file)
raise x
elif type(dataset) == type(''):
datatmp.append(DiracFile(lfn=dataset))
else:
logger.error("Unkown dataset type, cannot perform split here")
from Ganga.Core.exceptions import GangaException
logger.error("Dataset found: " + str(dataset))
raise GangaException(
"Unkown dataset type, cannot perform split here")
logger.debug("Creating new Job in Splitter")
j = Job()
logger.debug("Copying From Job")
j.copyFrom(
stripProxy(job), ['inputdata', 'inputsandbox', 'inputfiles'])
logger.debug("Unsetting Splitter")
j.splitter = None
logger.debug("Unsetting Merger")
j.merger = None
# j.inputsandbox = [] ## master added automatically
#j.inputfiles = []
logger.debug("Setting InputData")
j.inputdata = LHCbDataset(files=datatmp[:],
persistency=self.persistency,
depth=self.depth)
#j.inputdata.XMLCatalogueSlice = self.XMLCatalogueSlice
logger.debug("Returning new subjob")
return j
示例5: _create_subjob
# 需要导入模块: from Ganga.GPIDev.Lib.Job import Job [as 别名]
# 或者: from Ganga.GPIDev.Lib.Job.Job import inputdata [as 别名]
def _create_subjob(self, job, inputdata):
j = Job()
j.copyFrom(job)
j.splitter = None
j.merger = None
j.inputsandbox = [] # master added automatically
j.inputfiles = []
j.inputdata = inputdata
return j
示例6: _create_subjob
# 需要导入模块: from Ganga.GPIDev.Lib.Job import Job [as 别名]
# 或者: from Ganga.GPIDev.Lib.Job.Job import inputdata [as 别名]
def _create_subjob(self, job, dataset):
logger.debug("_create_subjob")
j = Job()
j.copyFrom(job)
j.splitter = None
j.merger = None
j.inputsandbox = [] # master added automatically
j.inputdata = GaudiDataset(files=dataset)
## if not j.inputdata: j.inputdata = GaudiDataset(files=dataset)
# else: j.inputdata.files = dataset
return j
示例7: createSubjob
# 需要导入模块: from Ganga.GPIDev.Lib.Job import Job [as 别名]
# 或者: from Ganga.GPIDev.Lib.Job.Job import inputdata [as 别名]
def createSubjob(self, job, additional_skip_args=None):
""" Create a new subjob by copying the master job and setting all fields correctly.
"""
from Ganga.GPIDev.Lib.Job import Job
if additional_skip_args is None:
additional_skip_args = []
j = Job()
skipping_args = ['splitter', 'inputsandbox', 'inputfiles', 'inputdata', 'subjobs']
for arg in additional_skip_args:
skipping_args.append(arg)
j.copyFrom(job, skipping_args)
j.splitter = None
j.inputsandbox = []
j.inputfiles = []
j.inputdata = []
return j
示例8: _create_subjob
# 需要导入模块: from Ganga.GPIDev.Lib.Job import Job [as 别名]
# 或者: from Ganga.GPIDev.Lib.Job.Job import inputdata [as 别名]
def _create_subjob(self, job, dataset):
logger.debug("_create_subjob")
datatmp = []
if isinstance(dataset, LHCbDataset):
for i in dataset:
if isinstance(i, DiracFile):
datatmp.extend(i)
else:
logger.error(
"Unkown file-type %s, cannot perform split with file %s" % (type(i), str(i)))
from Ganga.Core.exceptions import GangaException
raise GangaException(
"Unkown file-type %s, cannot perform split with file %s" % (type(i), str(i)))
elif isinstance(dataset, list):
from Ganga.GPIDev.Base.Proxy import isType
for i in dataset:
if type(i) is str:
datatmp.append(DiracFile(lfn=i))
elif isType(i, DiracFile()):
datatmp.extend(i)
else:
x = GangaException(
"Unknown(unexpected) file object: %s" % i)
raise x
else:
logger.error("Unkown dataset type, cannot perform split here")
from Ganga.Core.exceptions import GangaException
raise GangaException(
"Unkown dataset type, cannot perform split here")
logger.debug("Creating new Job in Splitter")
j = Job()
j.copyFrom(stripProxy(job))
j.splitter = None
j.merger = None
j.inputsandbox = [] # master added automatically
j.inputfiles = []
j.inputdata = LHCbDataset(files=datatmp[:],
persistency=self.persistency,
depth=self.depth)
j.inputdata.XMLCatalogueSlice = self.XMLCatalogueSlice
return j
示例9: split
# 需要导入模块: from Ganga.GPIDev.Lib.Job import Job [as 别名]
# 或者: from Ganga.GPIDev.Lib.Job.Job import inputdata [as 别名]
def split(self,job):
from Ganga.GPIDev.Lib.Job import Job
subjobs = []
primeTables = job.inputdata.get_dataset()
## avoid creating jobs with nothing to do
if self.numsubjobs > len(primeTables):
self.numsubjobs = len(primeTables)
## create subjobs
for i in range(self.numsubjobs):
j = Job()
j.application = job.application
j.inputdata = job.inputdata
j.inputdata.table_id_lower = 1
j.inputdata.table_id_upper = 1
j.outputdata = job.outputdata
j.inputsandbox = job.inputsandbox
j.outputsandbox = job.outputsandbox
j.backend = job.backend
subjobs.append(j)
## chunksize of each subjob
chunksize = len(primeTables) / self.numsubjobs
offset = 0
for i in range(len(subjobs)):
my_chunksize = chunksize
if len(primeTables) % self.numsubjobs >= i+1: my_chunksize+=1
## set lower bound id (inclusive)
subjobs[i].inputdata.table_id_lower = offset+1
## fill subjob with prime tables
#for j in range(my_chunksize):
# subjobs[i].application.addPrimeTable(primeTables[offset+j])
offset += my_chunksize
## set upper bound id (inclusive)
subjobs[i].inputdata.table_id_upper = offset
return subjobs
示例10: test__setup_bulk_subjobs
# 需要导入模块: from Ganga.GPIDev.Lib.Job import Job [as 别名]
# 或者: from Ganga.GPIDev.Lib.Job.Job import inputdata [as 别名]
def test__setup_bulk_subjobs(tmpdir, db):
from Ganga.Core import BackendError
from Ganga.GPIDev.Lib.Dataset.Dataset import Dataset
from GangaDirac.Lib.Backends import Dirac
name = str(tmpdir.join('submit_script'))
with open(name, 'w') as fd:
fd.write(script_template.replace('###PARAMETRIC_INPUTDATA###', str([['a'], ['b']])))
with pytest.raises(BackendError):
db._setup_bulk_subjobs([], name)
d = Dirac()
j = Job()
j.id = 0 # This would normally be set by the registry if this was a proxy job
j.application = Executable()
j.splitter = ArgSplitter()
j.splitter.args = [['a'], ['b'], ['c'], ['d'], ['e']]
j.inputdata = Dataset()
j.backend = d
d._parent = j
dirac_ids = [123, 456]
def fake_setup_subjob_dataset(dataset):
assert dataset in [['a'], ['b']], 'dataset not passed properly'
with patch.object(d, '_setup_subjob_dataset', fake_setup_subjob_dataset):
assert d._setup_bulk_subjobs(dirac_ids, name), 'didnt run'
assert len(j.subjobs) == len(dirac_ids), 'didnt work'
for id_, backend_id, subjob in zip(range(len(dirac_ids)), dirac_ids, j.subjobs):
assert id_ == subjob.id, 'ids dont match'
assert backend_id == subjob.backend.id, 'backend.ids dont match'
assert isinstance(subjob.application, j.application.__class__), 'apps dont match'
assert subjob.splitter is None, 'splitter not done'
assert isinstance(subjob.backend, j.backend.__class__), 'backend dont match'