当前位置: 首页>>代码示例>>Python>>正文


Python options.get_training_parser方法代码示例

本文整理汇总了Python中fairseq.options.get_training_parser方法的典型用法代码示例。如果您正苦于以下问题:Python options.get_training_parser方法的具体用法?Python options.get_training_parser怎么用?Python options.get_training_parser使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在fairseq.options的用法示例。


在下文中一共展示了options.get_training_parser方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: train_translation_model

# 需要导入模块: from fairseq import options [as 别名]
# 或者: from fairseq.options import get_training_parser [as 别名]
def train_translation_model(data_dir, arch, extra_flags=None):
    train_parser = options.get_training_parser()
    train_args = options.parse_args_and_arch(
        train_parser,
        [
            '--task', 'translation',
            data_dir,
            '--save-dir', data_dir,
            '--arch', arch,
            '--optimizer', 'nag',
            '--lr', '0.05',
            '--max-tokens', '500',
            '--max-epoch', '1',
            '--no-progress-bar',
            '--distributed-world-size', '1',
            '--source-lang', 'in',
            '--target-lang', 'out',
        ] + (extra_flags or []),
    )
    train.main(train_args) 
开发者ID:nusnlp,项目名称:crosentgec,代码行数:22,代码来源:test_binaries.py

示例2: train_language_model

# 需要导入模块: from fairseq import options [as 别名]
# 或者: from fairseq.options import get_training_parser [as 别名]
def train_language_model(data_dir, arch):
    train_parser = options.get_training_parser()
    train_args = options.parse_args_and_arch(
        train_parser,
        [
            '--task', 'language_modeling',
            data_dir,
            '--arch', arch,
            '--optimizer', 'nag',
            '--lr', '1.0',
            '--criterion', 'adaptive_loss',
            '--adaptive-softmax-cutoff', '5,10,15',
            '--decoder-layers', '[(850, 3)] * 2 + [(1024,4)]',
            '--decoder-embed-dim', '280',
            '--max-tokens', '500',
            '--tokens-per-sample', '500',
            '--save-dir', data_dir,
            '--max-epoch', '1',
            '--no-progress-bar',
            '--distributed-world-size', '1',
        ],
    )
    train.main(train_args) 
开发者ID:nusnlp,项目名称:crosentgec,代码行数:25,代码来源:test_binaries.py

示例3: train_masked_lm

# 需要导入模块: from fairseq import options [as 别名]
# 或者: from fairseq.options import get_training_parser [as 别名]
def train_masked_lm(data_dir, arch, extra_flags=None):
    train_parser = options.get_training_parser()
    train_args = options.parse_args_and_arch(
        train_parser,
        [
            '--task', 'masked_lm',
            data_dir,
            '--arch', arch,
            '--optimizer', 'adam',
            '--lr', '0.0001',
            '--criterion', 'masked_lm',
            '--max-sentences', '500',
            '--save-dir', data_dir,
            '--max-epoch', '1',
            '--no-progress-bar',
            '--distributed-world-size', '1',
            '--ddp-backend', 'no_c10d',
            '--num-workers', 0,
        ] + (extra_flags or []),
    )
    train.main(train_args) 
开发者ID:pytorch,项目名称:fairseq,代码行数:23,代码来源:test_binaries.py

示例4: train_roberta_head

# 需要导入模块: from fairseq import options [as 别名]
# 或者: from fairseq.options import get_training_parser [as 别名]
def train_roberta_head(data_dir, arch, num_classes=2, extra_flags=None):
    train_parser = options.get_training_parser()
    train_args = options.parse_args_and_arch(
        train_parser,
        [
            '--task', 'sentence_prediction',
            data_dir,
            '--arch', arch,
            '--num-classes', str(num_classes),
            '--optimizer', 'adam',
            '--lr', '0.0001',
            '--criterion', 'sentence_prediction',
            '--max-tokens', '500',
            '--max-positions', '500',
            '--max-sentences', '500',
            '--save-dir', data_dir,
            '--max-epoch', '1',
            '--no-progress-bar',
            '--distributed-world-size', '1',
            '--ddp-backend', 'no_c10d',
            '--num-workers', 0,
        ] + (extra_flags or []),
    )
    train.main(train_args) 
开发者ID:pytorch,项目名称:fairseq,代码行数:26,代码来源:test_binaries.py

示例5: cli_main

# 需要导入模块: from fairseq import options [as 别名]
# 或者: from fairseq.options import get_training_parser [as 别名]
def cli_main():
    parser = options.get_training_parser()
    args = options.parse_args_and_arch(parser)

    if args.distributed_init_method is None:
        distributed_utils.infer_init_method(args)

    if args.distributed_init_method is not None:
        # distributed training
        distributed_main(args.device_id, args)
    elif args.distributed_world_size > 1:
        # fallback for single node with multiple GPUs
        port = random.randint(10000, 20000)
        args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)
        args.distributed_rank = None  # set based on device id
        if max(args.update_freq) > 1 and args.ddp_backend != 'no_c10d':
            print('| NOTE: you may get better performance with: --ddp-backend=no_c10d')
        torch.multiprocessing.spawn(
            fn=distributed_main,
            args=(args, ),
            nprocs=args.distributed_world_size,
        )
    else:
        # single GPU training
        main(args) 
开发者ID:kakaobrain,项目名称:helo_word,代码行数:27,代码来源:train.py

示例6: train_language_model

# 需要导入模块: from fairseq import options [as 别名]
# 或者: from fairseq.options import get_training_parser [as 别名]
def train_language_model(data_dir, arch):
    train_parser = options.get_training_parser()
    train_args = options.parse_args_and_arch(
        train_parser,
        [
            '--task', 'language_modeling',
            data_dir,
            '--arch', arch,
            '--optimizer', 'nag',
            '--lr', '0.1',
            '--criterion', 'adaptive_loss',
            '--adaptive-softmax-cutoff', '5,10,15',
            '--decoder-layers', '[(850, 3)] * 2 + [(1024,4)]',
            '--decoder-embed-dim', '280',
            '--max-tokens', '500',
            '--tokens-per-sample', '500',
            '--save-dir', data_dir,
            '--max-epoch', '1',
            '--no-progress-bar',
            '--distributed-world-size', '1',
            '--ddp-backend', 'no_c10d',
        ],
    )
    train.main(train_args) 
开发者ID:kakaobrain,项目名称:helo_word,代码行数:26,代码来源:test_binaries.py

示例7: train_translation_model

# 需要导入模块: from fairseq import options [as 别名]
# 或者: from fairseq.options import get_training_parser [as 别名]
def train_translation_model(data_dir, arch, extra_flags=None, task='translation', run_validation=False,
                            lang_flags=None, extra_valid_flags=None):
    if lang_flags is None:
        lang_flags = [
            '--source-lang', 'in',
            '--target-lang', 'out',
        ]
    train_parser = options.get_training_parser()
    train_args = options.parse_args_and_arch(
        train_parser,
        [
            '--task', task,
            data_dir,
            '--save-dir', data_dir,
            '--arch', arch,
            '--lr', '0.05',
            '--max-tokens', '500',
            '--max-epoch', '1',
            '--no-progress-bar',
            '--distributed-world-size', '1',
            '--num-workers', 0,
        ] + lang_flags + (extra_flags or []),
    )
    train.main(train_args)

    if run_validation:
        # test validation
        validate_parser = options.get_validation_parser()
        validate_args = options.parse_args_and_arch(
            validate_parser,
            [
                '--task', task,
                data_dir,
                '--path', os.path.join(data_dir, 'checkpoint_last.pt'),
                '--valid-subset', 'valid',
                '--max-tokens', '500',
                '--no-progress-bar',
            ] + lang_flags + (extra_valid_flags or [])
        )
        validate.main(validate_args) 
开发者ID:pytorch,项目名称:fairseq,代码行数:42,代码来源:utils.py

示例8: train_language_model

# 需要导入模块: from fairseq import options [as 别名]
# 或者: from fairseq.options import get_training_parser [as 别名]
def train_language_model(data_dir, arch, extra_flags=None, run_validation=False):
    train_parser = options.get_training_parser()
    train_args = options.parse_args_and_arch(
        train_parser,
        [
            '--task', 'language_modeling',
            data_dir,
            '--arch', arch,
            '--optimizer', 'adam',
            '--lr', '0.0001',
            '--criterion', 'adaptive_loss',
            '--adaptive-softmax-cutoff', '5,10,15',
            '--max-tokens', '500',
            '--tokens-per-sample', '500',
            '--save-dir', data_dir,
            '--max-epoch', '1',
            '--no-progress-bar',
            '--distributed-world-size', '1',
            '--ddp-backend', 'no_c10d',
        ] + (extra_flags or []),
    )
    train.main(train_args)

    if run_validation:
        # test validation
        validate_parser = options.get_validation_parser()
        validate_args = options.parse_args_and_arch(
            validate_parser,
            [
                '--task', 'language_modeling',
                data_dir,
                '--path', os.path.join(data_dir, 'checkpoint_last.pt'),
                '--valid-subset', 'valid',
                '--max-tokens', '500',
                '--no-progress-bar',
            ]
        )
        validate.main(validate_args) 
开发者ID:pytorch,项目名称:fairseq,代码行数:40,代码来源:test_binaries.py

示例9: train_legacy_masked_language_model

# 需要导入模块: from fairseq import options [as 别名]
# 或者: from fairseq.options import get_training_parser [as 别名]
def train_legacy_masked_language_model(data_dir, arch, extra_args=()):
    train_parser = options.get_training_parser()
    # TODO: langs should be in and out right?
    train_args = options.parse_args_and_arch(
        train_parser,
        [
            "--task",
            "cross_lingual_lm",
            data_dir,
            "--arch",
            arch,
            # Optimizer args
            "--optimizer",
            "adam",
            "--lr-scheduler",
            "reduce_lr_on_plateau",
            "--lr-shrink",
            "0.5",
            "--lr",
            "0.0001",
            "--min-lr",
            "1e-09",
            # dropout, attention args
            "--dropout",
            "0.1",
            "--attention-dropout",
            "0.1",
            # MLM args
            "--criterion",
            "legacy_masked_lm_loss",
            "--masked-lm-only",
            "--monolingual-langs",
            "in,out",
            "--num-segment",
            "5",
            # Transformer args: use a small transformer model for fast training
            "--encoder-layers",
            "1",
            "--encoder-embed-dim",
            "32",
            "--encoder-attention-heads",
            "1",
            "--encoder-ffn-embed-dim",
            "32",
            # Other training args
            "--max-tokens",
            "500",
            "--tokens-per-sample",
            "500",
            "--save-dir",
            data_dir,
            "--max-epoch",
            "1",
            "--no-progress-bar",
            "--distributed-world-size",
            "1",
            "--dataset-impl",
            "raw",
        ] + list(extra_args),
    )
    train.main(train_args) 
开发者ID:pytorch,项目名称:fairseq,代码行数:63,代码来源:test_binaries.py

示例10: run_fault_tolerant_loop

# 需要导入模块: from fairseq import options [as 别名]
# 或者: from fairseq.options import get_training_parser [as 别名]
def run_fault_tolerant_loop():
    """Entrance function to the fairseq library, providing fault-tolerance."""

    # Parse the command line arguments.
    parser = options.get_training_parser()
    add_ray_args(parser)
    args = options.parse_args_and_arch(parser)
    original_args = copy.deepcopy(args)

    # Main loop for fault-tolerant training.
    retry = True
    while retry:
        args = copy.deepcopy(original_args)

        # Initialize Ray.
        ray.init(address=args.ray_address)

        set_num_resources(args)
        set_batch_size(args)

        # Set up Ray distributed actors.
        Actor = ray.remote(
            num_cpus=1, num_gpus=int(not args.cpu))(RayDistributedActor)
        workers = [Actor.remote() for i in range(args.distributed_world_size)]

        # Get the IP address and a free port of actor 0, which is used for
        # fairseq distributed training.
        ip = ray.get(workers[0].get_node_ip.remote())
        port = ray.get(workers[0].find_free_port.remote())
        address = "tcp://{ip}:{port}".format(ip=ip, port=port)

        # Start the remote processes, and check whether their are any process
        # fails. If so, restart all the processes.
        unfinished = [
            worker.run.remote(address, i, args)
            for i, worker in enumerate(workers)
        ]
        try:
            while len(unfinished) > 0:
                finished, unfinished = ray.wait(unfinished)
                finished = ray.get(finished)
            retry = False
        except Exception as inst:
            print("Ray restart because following error occurs:")
            print(inst)
            retry = True
        ray.shutdown() 
开发者ID:ray-project,项目名称:ray,代码行数:49,代码来源:ray_train.py


注:本文中的fairseq.options.get_training_parser方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。