本文整理汇总了Python中evaluator.evaluate方法的典型用法代码示例。如果您正苦于以下问题:Python evaluator.evaluate方法的具体用法?Python evaluator.evaluate怎么用?Python evaluator.evaluate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类evaluator
的用法示例。
在下文中一共展示了evaluator.evaluate方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: print_evaluation
# 需要导入模块: import evaluator [as 别名]
# 或者: from evaluator import evaluate [as 别名]
def print_evaluation(eval_type):
gold = copy.deepcopy(gold_dataset)
prediction = copy.deepcopy(dataset)
if eval_type == "uri":
gold, goldProperties = evaluator.filterForURIEvaluation(gold)
prediction, _ = evaluator.filterForURIEvaluation(prediction)
else:
goldProperties = properties
confusionMatrix, analysis = evaluator.evaluate(gold, prediction, eval_type, goldProperties)
# Print results
print("RESULTS FOR",eval_type)
evals = evaluator.microEvaluation(confusionMatrix, True)
evals.extend(evaluator.macroEvaluation(confusionMatrix))
evaluator.writeAnalysisFile(analysis, 'tmp', eval_type)
evaluator.writeHtmlFile(analysis, 'tmp', eval_type, goldProperties)
示例2: lamb
# 需要导入模块: import evaluator [as 别名]
# 或者: from evaluator import evaluate [as 别名]
def lamb(args, env):
# print("\n")
# print("args (" + str(len(args)) + "):", args)
# print("env: ", env.name)
if len(args) < 2:
throw_error("syntax", "Incorrect use of (lambda ...): must take at least two arguments (at least one variable and a body).")
largs = args[:-1]
lbody = args[-1]
# print("largs (" + str(len(largs)) + "):", largs)
for l in largs:
assert_or_throw(l['type'] == 'symbol', "syntax", "Incorrect use of (lambda ...): the anonymous function's variables must be symbols.")
largs = tuple(la['value'] for la in largs)
# print("lbody:", lbody)
def anonymous(*arguments):
# print("inside anonymous function")
# print("arguments(" + str(len(arguments)) + "):", arguments)
if len(arguments) != len(largs):
throw_error("syntax", "This function takes " + str(len(largs)) + " arguments (" + str(len(arguments)) + " provided).")
lenv = Environment(name="anon_fn", outer=env, variables=largs, values=arguments)
return ev.evaluate(lbody, lenv)
return anonymous
示例3: run_stats
# 需要导入模块: import evaluator [as 别名]
# 或者: from evaluator import evaluate [as 别名]
def run_stats(args):
import evaluator
rev_map = {}
for k, v in fake_metadata.items():
rev_map[v['participant_id']] = k
basedir = os.path.dirname( os.path.dirname(__file__) )
exome_dir = os.path.join(basedir, "testexomes")
out_scores = {}
for donor_dir in glob(os.path.join(args.out_dir, "*")):
donor = os.path.basename(donor_dir)
if rev_map[donor] not in out_scores:
out_scores[rev_map[donor]] = {}
for vcf_file in glob( os.path.join(donor_dir, "*.vcf")):
method = os.path.basename(vcf_file).replace(".vcf", "")
vtype = None
if method in SNP_METHOD:
vtype = "SNV"
if method in INDEL_METHOD:
vtype = "INDEL"
truth_file = os.path.join(exome_dir, "testexome" + rev_map[donor][-1:] + ".truth.vcf.gz" )
scores = evaluator.evaluate(vcf_file, truth_file, vtype=vtype, truthmask=False)
out_scores[rev_map[donor]][method] = scores
print out_scores
totals = {}
for v in out_scores.values():
for method, values in v.items():
if method not in totals:
totals[method] = []
totals[method].append( values )
for method, values in totals.items():
out = []
for i in range(3):
out.append( "%s" % (sum( j[i] for j in values ) / float(len(values) )) )
print method, "\t".join(out)
示例4: do
# 需要导入模块: import evaluator [as 别名]
# 或者: from evaluator import evaluate [as 别名]
def do(args, env):
do_env = Environment(name="do", outer=env)
if len(args) == 0:
throw_error("syntax", "Incorrect use of (do ...): must take at least one argument.")
result = None
for a in args:
result = ev.evaluate(a, do_env)
return result
示例5: define
# 需要导入模块: import evaluator [as 别名]
# 或者: from evaluator import evaluate [as 别名]
def define(args, env):
if len(args) != 2:
throw_error("syntax", "Incorrect use of (define ...): must take exactly two arguments.")
assert_or_throw(args[0]['type'] == 'symbol', "type", "Incorrect use of (define ...): the variable must be a symbol.")
variable = args[0]['value']
value = ev.evaluate(args[1], env)
env.set(variable, value)
return value
示例6: cond
# 需要导入模块: import evaluator [as 别名]
# 或者: from evaluator import evaluate [as 别名]
def cond(args, env):
if len(args) != 3:
throw_error("syntax", "Incorrect use of (if ...): must take exactly three arguments (a test, a pass case, and a fail case).")
test = ev.evaluate(args[0], env)
if type(test) != bool:
throw_error("type", "Incorrect use of (if ...): the test must evaluate to a boolean.")
if test:
return ev.evaluate(args[1], env)
else:
return ev.evaluate(args[2], env)
示例7: main
# 需要导入模块: import evaluator [as 别名]
# 或者: from evaluator import evaluate [as 别名]
def main(unused_argv):
if (FLAGS.omp > 0):
if not os.environ.get("OMP_NUM_THREADS"):
logging.info('OMP_NUM_THREADS value= %d', FLAGS.omp)
os.environ["OMP_NUM_THREADS"] = str(FLAGS.omp)
if not os.environ.get("KMP_BLOCKTIME"):
logging.info('KMP_BLOCKTIME value= %d', FLAGS.blocktime)
os.environ["KMP_BLOCKTIME"] = str(FLAGS.blocktime)
if not os.environ.get("KMP_SETTINGS"):
os.environ["KMP_SETTINGS"] = "1"
# os.environ["KMP_AFFINITY"]= "granularity=fine,verbose,compact,1,0"
assert FLAGS.checkpoint_dir, '`checkpoint_dir` is missing.'
assert FLAGS.eval_dir, '`eval_dir` is missing.'
tf.io.gfile.makedirs(FLAGS.eval_dir)
if FLAGS.pipeline_config_path:
configs = config_util.get_configs_from_pipeline_file(
FLAGS.pipeline_config_path)
tf.io.gfile.copy(FLAGS.pipeline_config_path,
os.path.join(FLAGS.eval_dir, 'pipeline.config'),
overwrite=True)
else:
configs = config_util.get_configs_from_multiple_files(
model_config_path=FLAGS.model_config_path,
eval_config_path=FLAGS.eval_config_path,
eval_input_config_path=FLAGS.input_config_path)
for name, config in [('model.config', FLAGS.model_config_path),
('eval.config', FLAGS.eval_config_path),
('input.config', FLAGS.input_config_path)]:
tf.io.gfile.copy(config,
os.path.join(FLAGS.eval_dir, name),
overwrite=True)
model_config = configs['model']
eval_config = configs['eval_config']
input_config = configs['eval_input_config']
if FLAGS.eval_training_data:
input_config = configs['train_input_config']
model_fn = functools.partial(
model_builder.build,
model_config=model_config,
is_training=False)
def get_next(config):
return tf.compat.v1.data.make_initializable_iterator(
dataset_util, dataset_builder.build(config)).get_next()
create_input_dict_fn = functools.partial(get_next, input_config)
label_map = label_map_util.load_labelmap(input_config.label_map_path)
max_num_classes = max([item.id for item in label_map.item])
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes)
if FLAGS.run_once:
eval_config.max_evals = 1
evaluator.evaluate(create_input_dict_fn, model_fn, eval_config, categories,
FLAGS.checkpoint_dir, FLAGS.eval_dir, intra_op=FLAGS.intra_op, inter_op=FLAGS.inter_op)