本文整理汇总了Python中evaluator.Evaluator.calc_cls_values方法的典型用法代码示例。如果您正苦于以下问题:Python Evaluator.calc_cls_values方法的具体用法?Python Evaluator.calc_cls_values怎么用?Python Evaluator.calc_cls_values使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类evaluator.Evaluator
的用法示例。
在下文中一共展示了Evaluator.calc_cls_values方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: evaluate
# 需要导入模块: from evaluator import Evaluator [as 别名]
# 或者: from evaluator.Evaluator import calc_cls_values [as 别名]
def evaluate(self):
""" Performs statistical evaluation of the result """
AdvPrint.cout("Evaluating Results")
resultCollectors = self.get_resultCollectors()
# evaluate all results
evaluators = dict()
for analysis in resultCollectors:
evaluators[analysis] = dict()
# only process those results and those signal regions that are given in the reference file
for analysis in Info.analyses:
signal_regions = Info.get_analysis_parameters(analysis)["signal_regions"]
for sr in signal_regions:
evaluator = Evaluator(resultCollectors[analysis][sr])
# Calculate everything that should be calculated
# TODO: Beware analyses with unknown background
evaluator.calc_efficiencies()
evaluator.calc_r_values()
if Info.flags["likelihood"]:
evaluator.calc_likelihood()
if Info.flags["fullcls"]:
evaluator.calc_cls_values()
if Info.flags["zsig"]:
evaluator.calc_zsig()
evaluators[analysis][sr] = evaluator
if Info.parameters["bestcls"] != 0:
AdvPrint.cout("Calculating CLs for the "+str(Info.parameters["bestcls"])+" most sensitive signal regions!")
best_evaluators = find_strongest_evaluators(evaluators, Info.parameters["bestcls"])
# if "bestcls" is 1, find_strongest_evaluators does not return a list but just the single best
if Info.parameters["bestcls"] == 1:
best_evaluators = [best_evaluators]
for ev in best_evaluators:
ev.calc_cls_values()
# find best result
best_evaluator_per_analysis = dict()
for analysis in evaluators:
# Find bes of all SRs in analysis
best_evaluator_per_analysis[analysis] = find_strongest_evaluators(evaluators[analysis], 1)
best_evaluator = find_strongest_evaluators(best_evaluator_per_analysis, 1)
AdvPrint.set_cout_file(Info.files['output_totalresults'], True)
AdvPrint.mute()
for col in Info.parameters["TotalEvaluationFileColumns"]:
AdvPrint.cout(col+" ", "nlb")
AdvPrint.cout("")
for a in sorted(evaluators.keys()):
for sr in sorted(evaluators[a].keys()):
AdvPrint.cout(evaluators[a][sr].line_from_data(Info.parameters["TotalEvaluationFileColumns"]))
AdvPrint.format_columnated_file(Info.files['output_totalresults'])
AdvPrint.set_cout_file(Info.files['output_bestsignalregions'], True)
AdvPrint.mute()
for col in Info.parameters["BestPerAnalysisEvaluationFileColumns"]:
AdvPrint.cout(col+" ", "nlb")
AdvPrint.cout("")
# print analyses in alphabetic order
for a in sorted(best_evaluator_per_analysis.keys()):
AdvPrint.cout(best_evaluator_per_analysis[a].line_from_data(Info.parameters["BestPerAnalysisEvaluationFileColumns"]))
AdvPrint.format_columnated_file(Info.files['output_bestsignalregions'])
AdvPrint.set_cout_file("#None")
AdvPrint.unmute()
best_evaluator.check_warnings()
best_evaluator.print_result()
if Info.flags['zsig']:
_print_zsig(evaluators)
if Info.flags['likelihood']:
_print_likelihood(evaluators)