本文整理汇总了Python中preprocess.build_save_dataset方法的典型用法代码示例。如果您正苦于以下问题:Python preprocess.build_save_dataset方法的具体用法?Python preprocess.build_save_dataset怎么用?Python preprocess.build_save_dataset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类preprocess
的用法示例。
在下文中一共展示了preprocess.build_save_dataset方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: dataset_build
# 需要导入模块: import preprocess [as 别名]
# 或者: from preprocess import build_save_dataset [as 别名]
def dataset_build(self, opt):
fields = onmt.inputters.get_fields("text", 0, 0)
if hasattr(opt, 'src_vocab') and len(opt.src_vocab) > 0:
with codecs.open(opt.src_vocab, 'w', 'utf-8') as f:
f.write('a\nb\nc\nd\ne\nf\n')
if hasattr(opt, 'tgt_vocab') and len(opt.tgt_vocab) > 0:
with codecs.open(opt.tgt_vocab, 'w', 'utf-8') as f:
f.write('a\nb\nc\nd\ne\nf\n')
train_data_files = preprocess.build_save_dataset('train', fields, opt)
preprocess.build_save_vocab(train_data_files, fields, opt)
preprocess.build_save_dataset('valid', fields, opt)
# Remove the generated *pt files.
for pt in glob.glob(SAVE_DATA_PREFIX + '*.pt'):
os.remove(pt)
if hasattr(opt, 'src_vocab') and os.path.exists(opt.src_vocab):
os.remove(opt.src_vocab)
if hasattr(opt, 'tgt_vocab') and os.path.exists(opt.tgt_vocab):
os.remove(opt.tgt_vocab)
示例2: dataset_build
# 需要导入模块: import preprocess [as 别名]
# 或者: from preprocess import build_save_dataset [as 别名]
def dataset_build(self, opt):
fields = onmt.io.get_fields("text", 0, 0)
if hasattr(opt, 'src_vocab') and len(opt.src_vocab) > 0:
with codecs.open(opt.src_vocab, 'w', 'utf-8') as f:
f.write('a\nb\nc\nd\ne\nf\n')
if hasattr(opt, 'tgt_vocab') and len(opt.tgt_vocab) > 0:
with codecs.open(opt.tgt_vocab, 'w', 'utf-8') as f:
f.write('a\nb\nc\nd\ne\nf\n')
train_data_files = preprocess.build_save_dataset('train', fields, opt)
preprocess.build_save_vocab(train_data_files, fields, opt)
preprocess.build_save_dataset('valid', fields, opt)
# Remove the generated *pt files.
for pt in glob.glob(SAVE_DATA_PREFIX + '*.pt'):
os.remove(pt)
if hasattr(opt, 'src_vocab') and os.path.exists(opt.src_vocab):
os.remove(opt.src_vocab)
if hasattr(opt, 'tgt_vocab') and os.path.exists(opt.tgt_vocab):
os.remove(opt.tgt_vocab)
示例3: dataset_build
# 需要导入模块: import preprocess [as 别名]
# 或者: from preprocess import build_save_dataset [as 别名]
def dataset_build(self, opt):
fields = onmt.inputters.get_fields("text", 0, 0)
if hasattr(opt, 'src_vocab') and len(opt.src_vocab) > 0:
with codecs.open(opt.src_vocab, 'w', 'utf-8') as f:
f.write('a\nb\nc\nd\ne\nf\n')
if hasattr(opt, 'tgt_vocab') and len(opt.tgt_vocab) > 0:
with codecs.open(opt.tgt_vocab, 'w', 'utf-8') as f:
f.write('a\nb\nc\nd\ne\nf\n')
src_reader = onmt.inputters.str2reader[opt.data_type].from_opt(opt)
tgt_reader = onmt.inputters.str2reader["text"].from_opt(opt)
preprocess.build_save_dataset(
'train', fields, src_reader, tgt_reader, opt)
preprocess.build_save_dataset(
'valid', fields, src_reader, tgt_reader, opt)
# Remove the generated *pt files.
for pt in glob.glob(SAVE_DATA_PREFIX + '*.pt'):
os.remove(pt)
if hasattr(opt, 'src_vocab') and os.path.exists(opt.src_vocab):
os.remove(opt.src_vocab)
if hasattr(opt, 'tgt_vocab') and os.path.exists(opt.tgt_vocab):
os.remove(opt.tgt_vocab)
示例4: dataset_build
# 需要导入模块: import preprocess [as 别名]
# 或者: from preprocess import build_save_dataset [as 别名]
def dataset_build(self, opt):
fields = onmt.io.get_fields("text", 0, 0)
train_data_files = preprocess.build_save_dataset('train', fields, opt)
preprocess.build_save_vocab(train_data_files, fields, opt)
preprocess.build_save_dataset('valid', fields, opt)
# Remove the generated *pt files.
for pt in glob.glob(SAVE_DATA_PREFIX + '*.pt'):
os.remove(pt)
示例5: dataset_build
# 需要导入模块: import preprocess [as 别名]
# 或者: from preprocess import build_save_dataset [as 别名]
def dataset_build(self, opt):
fields = onmt.inputters.get_fields("text", 0, 0)
if hasattr(opt, 'src_vocab') and len(opt.src_vocab) > 0:
with codecs.open(opt.src_vocab, 'w', 'utf-8') as f:
f.write('a\nb\nc\nd\ne\nf\n')
if hasattr(opt, 'tgt_vocab') and len(opt.tgt_vocab) > 0:
with codecs.open(opt.tgt_vocab, 'w', 'utf-8') as f:
f.write('a\nb\nc\nd\ne\nf\n')
src_reader = onmt.inputters.str2reader[opt.data_type].from_opt(opt)
tgt_reader = onmt.inputters.str2reader["text"].from_opt(opt)
train_data_files = preprocess.build_save_dataset(
'train', fields, src_reader, tgt_reader, opt)
preprocess.build_save_vocab(train_data_files, fields, opt)
preprocess.build_save_dataset(
'valid', fields, src_reader, tgt_reader, opt)
# Remove the generated *pt files.
for pt in glob.glob(SAVE_DATA_PREFIX + '*.pt'):
os.remove(pt)
if hasattr(opt, 'src_vocab') and os.path.exists(opt.src_vocab):
os.remove(opt.src_vocab)
if hasattr(opt, 'tgt_vocab') and os.path.exists(opt.tgt_vocab):
os.remove(opt.tgt_vocab)