本文整理匯總了Python中fairseq.options.parse_args_and_arch方法的典型用法代碼示例。如果您正苦於以下問題:Python options.parse_args_and_arch方法的具體用法?Python options.parse_args_and_arch怎麽用?Python options.parse_args_and_arch使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類fairseq.options
的用法示例。
在下文中一共展示了options.parse_args_and_arch方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: train_translation_model
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import parse_args_and_arch [as 別名]
def train_translation_model(data_dir, arch, extra_flags=None):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
'--task', 'translation',
data_dir,
'--save-dir', data_dir,
'--arch', arch,
'--optimizer', 'nag',
'--lr', '0.05',
'--max-tokens', '500',
'--max-epoch', '1',
'--no-progress-bar',
'--distributed-world-size', '1',
'--source-lang', 'in',
'--target-lang', 'out',
] + (extra_flags or []),
)
train.main(train_args)
示例2: generate_main
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import parse_args_and_arch [as 別名]
def generate_main(data_dir):
generate_parser = options.get_generation_parser()
generate_args = options.parse_args_and_arch(
generate_parser,
[
data_dir,
'--path', os.path.join(data_dir, 'checkpoint_last.pt'),
'--beam', '3',
'--batch-size', '64',
'--max-len-b', '5',
'--gen-subset', 'valid',
'--no-progress-bar',
],
)
# evaluate model in batch mode
generate.main(generate_args)
# evaluate model interactively
generate_args.buffer_size = 0
generate_args.max_sentences = None
orig_stdin = sys.stdin
sys.stdin = StringIO('h e l l o\n')
interactive.main(generate_args)
sys.stdin = orig_stdin
示例3: train_language_model
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import parse_args_and_arch [as 別名]
def train_language_model(data_dir, arch):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
'--task', 'language_modeling',
data_dir,
'--arch', arch,
'--optimizer', 'nag',
'--lr', '1.0',
'--criterion', 'adaptive_loss',
'--adaptive-softmax-cutoff', '5,10,15',
'--decoder-layers', '[(850, 3)] * 2 + [(1024,4)]',
'--decoder-embed-dim', '280',
'--max-tokens', '500',
'--tokens-per-sample', '500',
'--save-dir', data_dir,
'--max-epoch', '1',
'--no-progress-bar',
'--distributed-world-size', '1',
],
)
train.main(train_args)
示例4: train_masked_lm
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import parse_args_and_arch [as 別名]
def train_masked_lm(data_dir, arch, extra_flags=None):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
'--task', 'masked_lm',
data_dir,
'--arch', arch,
'--optimizer', 'adam',
'--lr', '0.0001',
'--criterion', 'masked_lm',
'--max-sentences', '500',
'--save-dir', data_dir,
'--max-epoch', '1',
'--no-progress-bar',
'--distributed-world-size', '1',
'--ddp-backend', 'no_c10d',
'--num-workers', 0,
] + (extra_flags or []),
)
train.main(train_args)
示例5: train_roberta_head
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import parse_args_and_arch [as 別名]
def train_roberta_head(data_dir, arch, num_classes=2, extra_flags=None):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
'--task', 'sentence_prediction',
data_dir,
'--arch', arch,
'--num-classes', str(num_classes),
'--optimizer', 'adam',
'--lr', '0.0001',
'--criterion', 'sentence_prediction',
'--max-tokens', '500',
'--max-positions', '500',
'--max-sentences', '500',
'--save-dir', data_dir,
'--max-epoch', '1',
'--no-progress-bar',
'--distributed-world-size', '1',
'--ddp-backend', 'no_c10d',
'--num-workers', 0,
] + (extra_flags or []),
)
train.main(train_args)
示例6: cli_main
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import parse_args_and_arch [as 別名]
def cli_main():
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser)
if args.distributed_init_method is None:
distributed_utils.infer_init_method(args)
if args.distributed_init_method is not None:
# distributed training
distributed_main(args.device_id, args)
elif args.distributed_world_size > 1:
# fallback for single node with multiple GPUs
port = random.randint(10000, 20000)
args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)
args.distributed_rank = None # set based on device id
if max(args.update_freq) > 1 and args.ddp_backend != 'no_c10d':
print('| NOTE: you may get better performance with: --ddp-backend=no_c10d')
torch.multiprocessing.spawn(
fn=distributed_main,
args=(args, ),
nprocs=args.distributed_world_size,
)
else:
# single GPU training
main(args)
示例7: cli_main
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import parse_args_and_arch [as 別名]
def cli_main():
parser = options.get_generation_parser()
parser = add_asr_eval_argument(parser)
args = options.parse_args_and_arch(parser)
main(args)
示例8: eval_lm_main
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import parse_args_and_arch [as 別名]
def eval_lm_main(data_dir):
eval_lm_parser = options.get_eval_lm_parser()
eval_lm_args = options.parse_args_and_arch(
eval_lm_parser,
[
data_dir,
'--path', os.path.join(data_dir, 'checkpoint_last.pt'),
'--no-progress-bar',
],
)
eval_lm.main(eval_lm_args)
示例9: cli_main
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import parse_args_and_arch [as 別名]
def cli_main():
parser = options.get_eval_lm_parser()
args = options.parse_args_and_arch(parser)
distributed_utils.call_main(args, main)
示例10: cli_main
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import parse_args_and_arch [as 別名]
def cli_main():
parser = options.get_generation_parser()
args = options.parse_args_and_arch(parser)
main(args)
示例11: cli_main
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import parse_args_and_arch [as 別名]
def cli_main():
parser = options.get_validation_parser()
args = options.parse_args_and_arch(parser)
# only override args that are explicitly given on the command line
override_parser = options.get_validation_parser()
override_args = options.parse_args_and_arch(override_parser, suppress_defaults=True)
distributed_utils.call_main(args, main, override_args=override_args)
示例12: cli_main
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import parse_args_and_arch [as 別名]
def cli_main():
parser = options.get_interactive_generation_parser()
args = options.parse_args_and_arch(parser)
distributed_utils.call_main(args, main)
示例13: cli_main
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import parse_args_and_arch [as 別名]
def cli_main():
parser = rerank_options.get_reranking_parser()
args = options.parse_args_and_arch(parser)
score_bw(args)
示例14: cli_main
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import parse_args_and_arch [as 別名]
def cli_main():
parser = rerank_options.get_tuning_parser()
args = options.parse_args_and_arch(parser)
random_search(args)
示例15: cli_main
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import parse_args_and_arch [as 別名]
def cli_main():
parser = rerank_options.get_reranking_parser()
args = options.parse_args_and_arch(parser)
gen_and_reprocess_nbest(args)