本文整理汇总了Python中Ganga.GPIDev.Lib.Job.Job.splitter方法的典型用法代码示例。如果您正苦于以下问题:Python Job.splitter方法的具体用法?Python Job.splitter怎么用?Python Job.splitter使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Ganga.GPIDev.Lib.Job.Job
的用法示例。
在下文中一共展示了Job.splitter方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _create_subjob
# 需要导入模块: from Ganga.GPIDev.Lib.Job import Job [as 别名]
# 或者: from Ganga.GPIDev.Lib.Job.Job import splitter [as 别名]
def _create_subjob(self, job, dataset):
logger.debug("_create_subjob")
datatmp = []
logger.debug("dataset size: %s" % str(len(dataset)))
#logger.debug( "dataset: %s" % str(dataset) )
if isinstance(dataset, LHCbDataset):
for i in dataset:
if isType(i, DiracFile):
datatmp.append(i)
else:
logger.error(
"Unkown file-type %s, cannot perform split with file %s" % (type(i), str(i)))
from Ganga.Core.exceptions import GangaException
raise GangaException(
"Unkown file-type %s, cannot perform split with file %s" % (type(i), str(i)))
elif type(dataset) == type([]) or isType(dataset, GangaList()):
for file in dataset:
if type(file) == type(''):
datatmp.append(
allComponentFilters['gangafiles'](file, None))
elif isType(file, IGangaFile):
datatmp.append(file)
else:
logger.error("Unexpected type: %s" % str(type(file)))
logger.error(
"Wanted object to inherit from type: %s: %s" % (str(type(IGangaFile()))))
from Ganga.Core.exceptions import GangaException
x = GangaException(
"Unknown(unexpected) file object: %s" % file)
raise x
elif type(dataset) == type(''):
datatmp.append(DiracFile(lfn=dataset))
else:
logger.error("Unkown dataset type, cannot perform split here")
from Ganga.Core.exceptions import GangaException
logger.error("Dataset found: " + str(dataset))
raise GangaException(
"Unkown dataset type, cannot perform split here")
logger.debug("Creating new Job in Splitter")
j = Job()
logger.debug("Copying From Job")
j.copyFrom(
stripProxy(job), ['inputdata', 'inputsandbox', 'inputfiles'])
logger.debug("Unsetting Splitter")
j.splitter = None
logger.debug("Unsetting Merger")
j.merger = None
# j.inputsandbox = [] ## master added automatically
#j.inputfiles = []
logger.debug("Setting InputData")
j.inputdata = LHCbDataset(files=datatmp[:],
persistency=self.persistency,
depth=self.depth)
#j.inputdata.XMLCatalogueSlice = self.XMLCatalogueSlice
logger.debug("Returning new subjob")
return j
示例2: _create_subjob
# 需要导入模块: from Ganga.GPIDev.Lib.Job import Job [as 别名]
# 或者: from Ganga.GPIDev.Lib.Job.Job import splitter [as 别名]
def _create_subjob(self, job, inputdata):
j = Job()
j.copyFrom(job)
j.splitter = None
j.merger = None
j.inputsandbox = [] # master added automatically
j.inputfiles = []
j.inputdata = inputdata
return j
示例3: createSubjob
# 需要导入模块: from Ganga.GPIDev.Lib.Job import Job [as 别名]
# 或者: from Ganga.GPIDev.Lib.Job.Job import splitter [as 别名]
def createSubjob(self,job):
""" Create a new subjob by copying the master job and setting all fields correctly.
"""
from Ganga.GPIDev.Lib.Job import Job
j = Job()
j.copyFrom(job)
j.splitter=None
#FIXME:
j.inputsandbox = []
return j
示例4: _create_subjob
# 需要导入模块: from Ganga.GPIDev.Lib.Job import Job [as 别名]
# 或者: from Ganga.GPIDev.Lib.Job.Job import splitter [as 别名]
def _create_subjob(self, job, dataset):
logger.debug("_create_subjob")
j = Job()
j.copyFrom(job)
j.splitter = None
j.merger = None
j.inputsandbox = [] # master added automatically
j.inputdata = GaudiDataset(files=dataset)
## if not j.inputdata: j.inputdata = GaudiDataset(files=dataset)
# else: j.inputdata.files = dataset
return j
示例5: split
# 需要导入模块: from Ganga.GPIDev.Lib.Job import Job [as 别名]
# 或者: from Ganga.GPIDev.Lib.Job.Job import splitter [as 别名]
def split(self, job):
subjobs = []
for i in range(self.numJobs):
j = Job()
j.copyFrom(job)
j.splitter = None
j.merger = None
j.inputsandbox = [] # master added automatically
subjobs.append(j)
return subjobs
示例6: createSubjob
# 需要导入模块: from Ganga.GPIDev.Lib.Job import Job [as 别名]
# 或者: from Ganga.GPIDev.Lib.Job.Job import splitter [as 别名]
def createSubjob(self, job, additional_skip_args=None):
""" Create a new subjob by copying the master job and setting all fields correctly.
"""
from Ganga.GPIDev.Lib.Job import Job
if additional_skip_args is None:
additional_skip_args = []
j = Job()
skipping_args = ['splitter', 'inputsandbox', 'inputfiles', 'inputdata', 'subjobs']
for arg in additional_skip_args:
skipping_args.append(arg)
j.copyFrom(job, skipping_args)
j.splitter = None
j.inputsandbox = []
j.inputfiles = []
j.inputdata = []
return j
示例7: _create_subjob
# 需要导入模块: from Ganga.GPIDev.Lib.Job import Job [as 别名]
# 或者: from Ganga.GPIDev.Lib.Job.Job import splitter [as 别名]
def _create_subjob(self, job, dataset):
logger.debug("_create_subjob")
datatmp = []
if isinstance(dataset, LHCbDataset):
for i in dataset:
if isinstance(i, DiracFile):
datatmp.extend(i)
else:
logger.error(
"Unkown file-type %s, cannot perform split with file %s" % (type(i), str(i)))
from Ganga.Core.exceptions import GangaException
raise GangaException(
"Unkown file-type %s, cannot perform split with file %s" % (type(i), str(i)))
elif isinstance(dataset, list):
from Ganga.GPIDev.Base.Proxy import isType
for i in dataset:
if type(i) is str:
datatmp.append(DiracFile(lfn=i))
elif isType(i, DiracFile()):
datatmp.extend(i)
else:
x = GangaException(
"Unknown(unexpected) file object: %s" % i)
raise x
else:
logger.error("Unkown dataset type, cannot perform split here")
from Ganga.Core.exceptions import GangaException
raise GangaException(
"Unkown dataset type, cannot perform split here")
logger.debug("Creating new Job in Splitter")
j = Job()
j.copyFrom(stripProxy(job))
j.splitter = None
j.merger = None
j.inputsandbox = [] # master added automatically
j.inputfiles = []
j.inputdata = LHCbDataset(files=datatmp[:],
persistency=self.persistency,
depth=self.depth)
j.inputdata.XMLCatalogueSlice = self.XMLCatalogueSlice
return j
示例8: test__setup_bulk_subjobs
# 需要导入模块: from Ganga.GPIDev.Lib.Job import Job [as 别名]
# 或者: from Ganga.GPIDev.Lib.Job.Job import splitter [as 别名]
def test__setup_bulk_subjobs(tmpdir, db):
from Ganga.Core import BackendError
from Ganga.GPIDev.Lib.Dataset.Dataset import Dataset
from GangaDirac.Lib.Backends import Dirac
name = str(tmpdir.join('submit_script'))
with open(name, 'w') as fd:
fd.write(script_template.replace('###PARAMETRIC_INPUTDATA###', str([['a'], ['b']])))
with pytest.raises(BackendError):
db._setup_bulk_subjobs([], name)
d = Dirac()
j = Job()
j.id = 0 # This would normally be set by the registry if this was a proxy job
j.application = Executable()
j.splitter = ArgSplitter()
j.splitter.args = [['a'], ['b'], ['c'], ['d'], ['e']]
j.inputdata = Dataset()
j.backend = d
d._parent = j
dirac_ids = [123, 456]
def fake_setup_subjob_dataset(dataset):
assert dataset in [['a'], ['b']], 'dataset not passed properly'
with patch.object(d, '_setup_subjob_dataset', fake_setup_subjob_dataset):
assert d._setup_bulk_subjobs(dirac_ids, name), 'didnt run'
assert len(j.subjobs) == len(dirac_ids), 'didnt work'
for id_, backend_id, subjob in zip(range(len(dirac_ids)), dirac_ids, j.subjobs):
assert id_ == subjob.id, 'ids dont match'
assert backend_id == subjob.backend.id, 'backend.ids dont match'
assert isinstance(subjob.application, j.application.__class__), 'apps dont match'
assert subjob.splitter is None, 'splitter not done'
assert isinstance(subjob.backend, j.backend.__class__), 'backend dont match'