本文整理匯總了Python中eval.QGEvalCap方法的典型用法代碼示例。如果您正苦於以下問題:Python eval.QGEvalCap方法的具體用法?Python eval.QGEvalCap怎麽用?Python eval.QGEvalCap使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類eval
的用法示例。
在下文中一共展示了eval.QGEvalCap方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: eval
# 需要導入模塊: import eval [as 別名]
# 或者: from eval import QGEvalCap [as 別名]
def eval(out_file, src_file, tgt_file, isDIn=False, num_pairs=500):
"""
Given a filename, calculate the metric scores for that prediction file
isDin: boolean value to check whether input file is DirectIn.txt
"""
pairs = []
with open(src_file, 'r') as infile:
for line in infile:
pair = {}
pair['tokenized_sentence'] = line[:-1].strip().lower()
pairs.append(pair)
with open(tgt_file, "r") as infile:
cnt = 0
for line in infile:
pairs[cnt]['tokenized_question'] = line[:-1].strip()
cnt += 1
output = []
with open(out_file, 'r') as infile:
for line in infile:
line = fix_tokenization(line[:-1].strip()).lower()
output.append(line)
for idx, pair in enumerate(pairs):
pair['prediction'] = output[idx]
# eval
from eval import QGEvalCap
import json
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.4f')
res = defaultdict(lambda: [])
gts = defaultdict(lambda: [])
for pair in pairs[:]:
key = pair['tokenized_sentence']
res[key] = [pair['prediction'].encode('utf-8')]
# gts
gts[key].append(pair['tokenized_question'].encode('utf-8'))
QGEval = QGEvalCap(gts, res)
return QGEval.evaluate()
示例2: eval
# 需要導入模塊: import eval [as 別名]
# 或者: from eval import QGEvalCap [as 別名]
def eval(out_file, src_file, tgt_file, isDIn=False, num_pairs=500):
"""
Given a filename, calculate the metric scores for that prediction file
isDin: boolean value to check whether input file is DirectIn.txt
"""
pairs = []
with open(src_file, 'r') as infile:
for line in infile:
pair = {}
pair['tokenized_sentence'] = line[:-1].strip().lower()
pairs.append(pair)
with open(tgt_file, "r") as infile:
cnt = 0
for line in infile:
pairs[cnt]['tokenized_question'] = " ".join(
detokenize(line[:-1].strip().split())).lower()
cnt += 1
output = []
with open(out_file, 'r') as infile:
for line in infile:
line = line[:-1].strip().lower()
output.append(line)
for idx, pair in enumerate(pairs):
pair['prediction'] = output[idx]
# eval
from eval import QGEvalCap
import json
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.4f')
res = defaultdict(lambda: [])
gts = defaultdict(lambda: [])
for pair in pairs[:]:
key = pair['tokenized_sentence']
res[key] = [pair['prediction'].encode('utf-8')]
# gts
gts[key].append(pair['tokenized_question'].encode('utf-8'))
QGEval = QGEvalCap(gts, res)
return QGEval.evaluate()
示例3: eval
# 需要導入模塊: import eval [as 別名]
# 或者: from eval import QGEvalCap [as 別名]
def eval(out_file, src_file, tgt_file, isDIn = False, num_pairs = 500):
"""
Given a filename, calculate the metric scores for that prediction file
isDin: boolean value to check whether input file is DirectIn.txt
"""
pairs = []
with open(src_file, 'r') as infile:
for line in infile:
pair = {}
pair['tokenized_sentence'] = line[:-1]
pairs.append(pair)
with open(tgt_file, "r") as infile:
cnt = 0
for line in infile:
pairs[cnt]['tokenized_question'] = line[:-1]
cnt += 1
output = []
with open(out_file, 'r') as infile:
for line in infile:
line = line[:-1]
output.append(line)
for idx, pair in enumerate(pairs):
pair['prediction'] = output[idx]
## eval
from eval import QGEvalCap
import json
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.4f')
res = defaultdict(lambda: [])
gts = defaultdict(lambda: [])
for pair in pairs[:]:
key = pair['tokenized_sentence']
res[key] = [pair['prediction'].encode('utf-8')]
## gts
gts[key].append(pair['tokenized_question'].encode('utf-8'))
QGEval = QGEvalCap(gts, res)
return QGEval.evaluate()
示例4: eval
# 需要導入模塊: import eval [as 別名]
# 或者: from eval import QGEvalCap [as 別名]
def eval(out_file, src_file, tgt_file, isDIn = False, num_pairs = 500):
"""
Given a filename, calculate the metric scores for that prediction file
isDin: boolean value to check whether input file is DirectIn.txt
"""
pairs = []
with open(src_file, 'r') as infile:
for line in infile:
pair = {}
pair['tokenized_sentence'] = line[:-1]
pairs.append(pair)
with open(tgt_file, "r") as infile:
cnt = 0
for line in infile:
pairs[cnt]['tokenized_question'] = line[:-1]
cnt += 1
output = []
with open(out_file, 'r') as infile:
for line in infile:
line = line[:-1]
output.append(line)
for idx, pair in enumerate(pairs):
pair['prediction'] = output[idx]
## eval
from eval import QGEvalCap
import json
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.4f')
res = defaultdict(lambda: [])
gts = defaultdict(lambda: [])
for pair in pairs[:]:
key = pair['tokenized_sentence']
res[key] = [pair['prediction'].encode('utf-8')]
## gts
gts[key].append(pair['tokenized_question'].encode('utf-8'))
QGEval = QGEvalCap(gts, res)
return QGEval.evaluate()
示例5: eval
# 需要導入模塊: import eval [as 別名]
# 或者: from eval import QGEvalCap [as 別名]
def eval(out_file, src_file, tgt_file, isDIn = False, num_pairs = 500):
"""
Given a filename, calculate the metric scores for that prediction file
isDin: boolean value to check whether input file is DirectIn.txt
"""
pairs = []
with open(src_file, 'r') as infile:
for line in infile:
pair = {}
pair['tokenized_sentence'] = line[:-1]
pairs.append(pair)
with open(tgt_file, "r") as infile:
cnt = 0
for line in infile:
pairs[cnt]['tokenized_question'] = line[:-1]
cnt += 1
output = []
with open(out_file, 'r') as infile:
for line in infile:
line = line[:-1]
output.append(line)
for idx, pair in enumerate(pairs):
pair['prediction'] = output[idx]
## eval
from eval import QGEvalCap
import json
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.4f')
res = defaultdict(lambda: [])
gts = defaultdict(lambda: [])
set_trace()
for pair in pairs[:]:
key = pair['tokenized_sentence']
res[key] = [pair['prediction'].encode('utf-8')]
## gts
gts[key].append(pair['tokenized_question'].encode('utf-8'))
set_trace()
QGEval = QGEvalCap(gts, res)
return QGEval.evaluate()