本文整理匯總了Python中opts.train_opts方法的典型用法代碼示例。如果您正苦於以下問題:Python opts.train_opts方法的具體用法?Python opts.train_opts怎麽用?Python opts.train_opts使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類opts
的用法示例。
在下文中一共展示了opts.train_opts方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: main
# 需要導入模塊: import opts [as 別名]
# 或者: from opts import train_opts [as 別名]
def main(anno_file_name, col_headers, raw_args=None):
parser = argparse.ArgumentParser(description='evaluate.py')
opts.translate_opts(parser)
opt = parser.parse_args(raw_args)
torch.cuda.set_device(opt.gpu)
opt.db_file = os.path.join(opt.data_path, '{}.db'.format(opt.split))
opt.pre_word_vecs = os.path.join(opt.data_path, 'embedding')
dummy_parser = argparse.ArgumentParser(description='train.py')
opts.model_opts(dummy_parser)
opts.train_opts(dummy_parser)
dummy_opt = dummy_parser.parse_known_args([])[0]
opt.anno = anno_file_name
engine = DBEngine(opt.db_file)
js_list = table.IO.read_anno_json(opt.anno)
prev_best = (None, None)
sql_query = []
for fn_model in glob.glob(opt.model_path):
opt.model = fn_model
translator = Translator(opt, dummy_opt.__dict__)
data = table.IO.TableDataset(js_list, translator.fields, None, False)
test_data = table.IO.OrderedIterator(
dataset=data, device=opt.gpu, batch_size=opt.batch_size, train=False, sort=True, sort_within_batch=False)
# inference
r_list = []
for batch in test_data:
r_list += translator.translate(batch)
r_list.sort(key=lambda x: x.idx)
pred = r_list[-1]
sql_pred = {'agg':pred.agg, 'sel':pred.sel, 'conds': pred.recover_cond_to_gloss(js_list[-1])}
sql_query = Query(sql_pred['sel'], sql_pred['agg'], sql_pred['conds'])
try:
ans_pred = engine.execute_query(
js_list[-1]['table_id'], Query.from_dict(sql_pred), lower=True)
except Exception as e:
ans_pred = None
return sql_query.get_complete_query(col_headers), ans_pred
示例2: main
# 需要導入模塊: import opts [as 別名]
# 或者: from opts import train_opts [as 別名]
def main():
dummy_parser = argparse.ArgumentParser(description='train.py')
opts.model_opts(dummy_parser)
opts.train_opts(dummy_parser)
dummy_opt = dummy_parser.parse_known_args([])[0]
engine = DBEngine(opt.db_file)
with codecs.open(opt.source_file, "r", "utf-8") as corpus_file:
sql_list = [json.loads(line)['sql'] for line in corpus_file]
js_list = table.IO.read_anno_json(opt.anno)
prev_best = (None, None)
for fn_model in glob.glob(opt.model_path):
opt.model = fn_model
translator = Translator(opt, dummy_opt.__dict__)
data = table.IO.TableDataset(js_list, translator.fields, None, False)
test_data = table.IO.OrderedIterator(
dataset=data, device=opt.gpu, batch_size=opt.batch_size, train=False, sort=True, sort_within_batch=False)
# inference
if opt.beam_search:
print('Using execution guidance for inference.')
r_list = []
for batch in test_data:
r_list += translator.translate(batch, js_list, sql_list)
r_list.sort(key=lambda x: x.idx)
assert len(r_list) == len(js_list), 'len(r_list) != len(js_list): {} != {}'.format(
len(r_list), len(js_list))
# evaluation
for pred, gold, sql_gold in zip(r_list, js_list, sql_list):
pred.eval(gold, sql_gold, engine)
print('Results:')
for metric_name in ('all', 'exe'):
c_correct = sum((x.correct[metric_name] for x in r_list))
print('{}: {} / {} = {:.2%}'.format(metric_name, c_correct,
len(r_list), c_correct / len(r_list)))
if metric_name == 'all' and (prev_best[0] is None or c_correct > prev_best[1]):
prev_best = (fn_model, c_correct)
if (opt.split == 'dev') and (prev_best[0] is not None):
with codecs.open(os.path.join(opt.data_path, 'dev_best.txt'), 'w', encoding='utf-8') as f_out:
f_out.write('{}\n'.format(prev_best[0]))
示例3: main
# 需要導入模塊: import opts [as 別名]
# 或者: from opts import train_opts [as 別名]
def main():
dummy_parser = argparse.ArgumentParser(description='train.py')
opts.model_opts(dummy_parser)
opts.train_opts(dummy_parser)
dummy_opt = dummy_parser.parse_known_args([])[0]
js_list = table.IO.read_anno_json(opt.anno, opt)
metric_name_list = ['tgt']
prev_best = (None, None)
for fn_model in glob.glob(opt.model_path):
opt.model = fn_model
print(fn_model)
print(opt.anno)
translator = table.Translator(opt, dummy_opt.__dict__)
data = table.IO.TableDataset(
js_list, translator.fields, 0, None, False)
test_data = table.IO.OrderedIterator(
dataset=data, device=opt.gpu, batch_size=opt.batch_size, train=False, sort=True, sort_within_batch=False)
# inference
r_list = []
for batch in test_data:
r = translator.translate(batch)
r_list += r
r_list.sort(key=lambda x: x.idx)
assert len(r_list) == len(js_list), 'len(r_list) != len(js_list): {} != {}'.format(
len(r_list), len(js_list))
# evaluation
for pred, gold in zip(r_list, js_list):
pred.eval(gold)
print('Results:')
for metric_name in metric_name_list:
c_correct = sum((x.correct[metric_name] for x in r_list))
acc = c_correct / len(r_list)
print('{}: {} / {} = {:.2%}'.format(metric_name,
c_correct, len(r_list), acc))
if metric_name == 'tgt' and (prev_best[0] is None or acc > prev_best[1]):
prev_best = (fn_model, acc)
if (opt.split == 'dev') and (prev_best[0] is not None):
with codecs.open(os.path.join(opt.root_dir, opt.dataset, 'dev_best.txt'), 'w', encoding='utf-8') as f_out:
f_out.write('{}\n'.format(prev_best[0]))
示例4: main
# 需要導入模塊: import opts [as 別名]
# 或者: from opts import train_opts [as 別名]
def main():
dummy_parser = argparse.ArgumentParser(description='train.py')
opts.model_opts(dummy_parser)
opts.train_opts(dummy_parser)
dummy_opt = dummy_parser.parse_known_args([])[0]
engine = DBEngine(opt.db_file)
with codecs.open(opt.source_file, "r", "utf-8") as corpus_file:
sql_list = [json.loads(line)['sql'] for line in corpus_file]
js_list = table.IO.read_anno_json(opt.anno)
prev_best = (None, None)
for fn_model in glob.glob(opt.model_path):
print(fn_model)
print(opt.anno)
opt.model = fn_model
translator = table.Translator(opt, dummy_opt.__dict__)
data = table.IO.TableDataset(js_list, translator.fields, None, False)
test_data = table.IO.OrderedIterator(
dataset=data, device=opt.gpu, batch_size=opt.batch_size, train=False, sort=True, sort_within_batch=False)
# inference
r_list = []
for batch in test_data:
r_list += translator.translate(batch)
r_list.sort(key=lambda x: x.idx)
assert len(r_list) == len(js_list), 'len(r_list) != len(js_list): {} != {}'.format(
len(r_list), len(js_list))
# evaluation
for pred, gold, sql_gold in zip(r_list, js_list, sql_list):
pred.eval(gold, sql_gold, engine)
print('Results:')
for metric_name in ('all', 'exe'):
c_correct = sum((x.correct[metric_name] for x in r_list))
print('{}: {} / {} = {:.2%}'.format(metric_name, c_correct,
len(r_list), c_correct / len(r_list)))
if metric_name == 'all' and (prev_best[0] is None or c_correct > prev_best[1]):
prev_best = (fn_model, c_correct)
if (opt.split == 'dev') and (prev_best[0] is not None):
with codecs.open(os.path.join(opt.data_path, 'dev_best.txt'), 'w', encoding='utf-8') as f_out:
f_out.write('{}\n'.format(prev_best[0]))
示例5: main
# 需要導入模塊: import opts [as 別名]
# 或者: from opts import train_opts [as 別名]
def main():
dummy_parser = argparse.ArgumentParser(description='train.py')
opts.model_opts(dummy_parser)
opts.train_opts(dummy_parser)
dummy_opt = dummy_parser.parse_known_args([])[0]
js_list = table.IO.read_anno_json(opt.anno, opt)
bpe_processor = torch.load(opt.bpe_path)
metric_name_list = ['tgt']
prev_best = (None, None)
for fn_model in glob.glob(opt.model_path):
opt.model = fn_model
print(fn_model)
print(opt.anno)
translator = table.Translator(opt, dummy_opt.__dict__)
data = table.IO.TableDataset(
js_list, translator.fields, bpe_processor, 0, None, False)
test_data = table.IO.OrderedIterator(
dataset=data, device=opt.gpu, batch_size=opt.batch_size, train=False, sort=True, sort_within_batch=False)
# inference
r_list = []
for batch in test_data:
r = translator.translate(batch)
r_list += r
r_list.sort(key=lambda x: x.idx)
assert len(r_list) == len(js_list), 'len(r_list) != len(js_list): {} != {}'.format(
len(r_list), len(js_list))
# evaluation
for pred, gold in zip(r_list, js_list):
pred.eval(gold)
print('Results:')
for metric_name in metric_name_list:
c_correct = sum((x.correct[metric_name] for x in r_list))
acc = c_correct / len(r_list)
print('{}: {} / {} = {:.2%}'.format(metric_name,
c_correct, len(r_list), acc))
if metric_name == 'tgt' and (prev_best[0] is None or acc > prev_best[1]):
prev_best = (fn_model, acc)
if (opt.split == 'dev') and (prev_best[0] is not None):
with codecs.open(os.path.join(opt.root_dir, opt.dataset, 'dev_best.txt'), 'w', encoding='utf-8') as f_out:
f_out.write('{}\n'.format(prev_best[0]))