本文整理汇总了Python中dataset.Dataset.predefined方法的典型用法代码示例。如果您正苦于以下问题:Python Dataset.predefined方法的具体用法?Python Dataset.predefined怎么用?Python Dataset.predefined使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类dataset.Dataset
的用法示例。
在下文中一共展示了Dataset.predefined方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from dataset import Dataset [as 别名]
# 或者: from dataset.Dataset import predefined [as 别名]
def __init__(self, valName, alignment, config):
"""
This method adds additional items to the `self.general` dictionary
which are only needed for validations using datasets.
Arguments:
- `valName`: String which identifies individual validation instances
- `alignment`: `Alignment` instance to validate
- `config`: `BetterConfigParser` instance which includes the
configuration of the validations
"""
super(GenericValidationData, self).__init__(valName, alignment, config)
# if maxevents is not specified, cannot calculate number of events for
# each parallel job, and therefore running only a single job
if int( self.general["maxevents"] ) < 0 and self.NJobs > 1:
msg = ("Maximum number of events (maxevents) not specified: "
"cannot use parallel jobs.")
raise AllInOneError(msg)
if int( self.general["maxevents"] ) / self.NJobs != float( self.general["maxevents"] ) / self.NJobs:
msg = ("maxevents has to be divisible by parallelJobs")
raise AllInOneError(msg)
tryPredefinedFirst = (not self.jobmode.split( ',' )[0] == "crab" and self.general["JSON"] == ""
and self.general["firstRun"] == "" and self.general["lastRun"] == ""
and self.general["begin"] == "" and self.general["end"] == "")
if self.general["dataset"] not in globalDictionaries.usedDatasets:
globalDictionaries.usedDatasets[self.general["dataset"]] = {}
if self.cmssw not in globalDictionaries.usedDatasets[self.general["dataset"]]:
if globalDictionaries.usedDatasets[self.general["dataset"]] != {}:
print ("Warning: you use the same dataset '%s' in multiple cmssw releases.\n"
"This is allowed, but make sure it's not a mistake") % self.general["dataset"]
globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw] = {False: None, True: None}
Bfield = self.general.get("magneticfield", None)
if globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] is None:
dataset = Dataset(
self.general["dataset"], tryPredefinedFirst = tryPredefinedFirst,
cmssw = self.cmssw, cmsswrelease = self.cmsswreleasebase, magneticfield = Bfield,
dasinstance = self.general["dasinstance"])
globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] = dataset
if tryPredefinedFirst and not dataset.predefined(): #No point finding the data twice in that case
globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][False] = dataset
self.dataset = globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst]
self.general["magneticField"] = self.dataset.magneticField()
self.general["defaultMagneticField"] = "MagneticField"
if self.general["magneticField"] == "unknown":
print "Could not get the magnetic field for this dataset."
print "Using the default: ", self.general["defaultMagneticField"]
self.general["magneticField"] = '.oO[defaultMagneticField]Oo.'
if not self.jobmode.split( ',' )[0] == "crab":
try:
self.general["datasetDefinition"] = self.dataset.datasetSnippet(
jsonPath = self.general["JSON"],
firstRun = self.general["firstRun"],
lastRun = self.general["lastRun"],
begin = self.general["begin"],
end = self.general["end"],
parent = self.needParentFiles )
except AllInOneError as e:
msg = "In section [%s:%s]: "%(self.valType, self.name)
msg += str(e)
raise AllInOneError(msg)
else:
if self.dataset.predefined():
msg = ("For jobmode 'crab' you cannot use predefined datasets "
"(in your case: '%s')."%( self.dataset.name() ))
raise AllInOneError( msg )
try:
theUpdate = config.getResultingSection(self.valType+":"+self.name,
demandPars = ["parallelJobs"])
except AllInOneError as e:
msg = str(e)[:-1]+" when using 'jobmode: crab'."
raise AllInOneError(msg)
self.general.update(theUpdate)
if self.general["begin"] or self.general["end"]:
( self.general["begin"],
self.general["end"],
self.general["firstRun"],
self.general["lastRun"] ) = self.dataset.convertTimeToRun(
firstRun = self.general["firstRun"],
lastRun = self.general["lastRun"],
begin = self.general["begin"],
end = self.general["end"],
shortTuple = False)
if self.general["begin"] == None:
self.general["begin"] = ""
if self.general["end"] == None:
self.general["end"] = ""
self.general["firstRun"] = str( self.general["firstRun"] )
self.general["lastRun"] = str( self.general["lastRun"] )
if ( not self.general["firstRun"] ) and \
( self.general["end"] or self.general["lastRun"] ):
self.general["firstRun"] = str(
self.dataset.runList()[0]["run_number"])
#.........这里部分代码省略.........
示例2: __init__
# 需要导入模块: from dataset import Dataset [as 别名]
# 或者: from dataset.Dataset import predefined [as 别名]
def __init__(self, valName, alignment, config, valType,
addDefaults = {}, addMandatories=[]):
"""
This method adds additional items to the `self.general` dictionary
which are only needed for validations using datasets.
Arguments:
- `valName`: String which identifies individual validation instances
- `alignment`: `Alignment` instance to validate
- `config`: `BetterConfigParser` instance which includes the
configuration of the validations
- `valType`: String which specifies the type of validation
- `addDefaults`: Dictionary which contains default values for individual
validations in addition to the general default values
- `addMandatories`: List which contains mandatory parameters for
individual validations in addition to the general
mandatory parameters
"""
defaults = {"runRange": "",
"firstRun": "",
"lastRun": "",
"begin": "",
"end": "",
"JSON": ""
}
defaults.update(addDefaults)
mandatories = [ "dataset", "maxevents" ]
mandatories += addMandatories
GenericValidation.__init__(self, valName, alignment, config, valType, defaults, mandatories)
# if maxevents is not specified, cannot calculate number of events for
# each parallel job, and therefore running only a single job
if int( self.general["maxevents"] ) == -1 and self.NJobs > 1:
msg = ("Maximum number of events (maxevents) not specified: "
"cannot use parallel jobs.")
raise AllInOneError(msg)
tryPredefinedFirst = (not self.jobmode.split( ',' )[0] == "crab" and self.general["JSON"] == ""
and self.general["firstRun"] == "" and self.general["lastRun"] == ""
and self.general["begin"] == "" and self.general["end"] == "")
if self.general["dataset"] not in globalDictionaries.usedDatasets:
globalDictionaries.usedDatasets[self.general["dataset"]] = {}
if self.cmssw not in globalDictionaries.usedDatasets[self.general["dataset"]]:
if globalDictionaries.usedDatasets[self.general["dataset"]] != {}:
print ("Warning: you use the same dataset '%s' in multiple cmssw releases.\n"
"This is allowed, but make sure it's not a mistake") % self.general["dataset"]
globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw] = {False: None, True: None}
if globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] is None:
dataset = Dataset(
self.general["dataset"], tryPredefinedFirst = tryPredefinedFirst,
cmssw = self.cmssw, cmsswrelease = self.cmsswreleasebase )
globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst] = dataset
if tryPredefinedFirst and not dataset.predefined(): #No point finding the data twice in that case
globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][False] = dataset
self.dataset = globalDictionaries.usedDatasets[self.general["dataset"]][self.cmssw][tryPredefinedFirst]
self.general["magneticField"] = self.dataset.magneticField()
self.general["defaultMagneticField"] = "38T"
if self.general["magneticField"] == "unknown":
print "Could not get the magnetic field for this dataset."
print "Using the default: ", self.general["defaultMagneticField"]
self.general["magneticField"] = '.oO[defaultMagneticField]Oo.'
if not self.jobmode.split( ',' )[0] == "crab":
try:
self.general["datasetDefinition"] = self.dataset.datasetSnippet(
jsonPath = self.general["JSON"],
firstRun = self.general["firstRun"],
lastRun = self.general["lastRun"],
begin = self.general["begin"],
end = self.general["end"],
parent = self.needParentFiles )
except AllInOneError, e:
msg = "In section [%s:%s]: "%(valType, self.name)
msg += str(e)
raise AllInOneError(msg)