本文整理匯總了Python中fairseq.options.eval_bool方法的典型用法代碼示例。如果您正苦於以下問題:Python options.eval_bool方法的具體用法?Python options.eval_bool怎麽用?Python options.eval_bool使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類fairseq.options
的用法示例。
在下文中一共展示了options.eval_bool方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: setup_task
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import eval_bool [as 別名]
def setup_task(cls, args, **kwargs):
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
# find language pair automatically
if args.source_lang is None or args.target_lang is None:
args.source_lang, args.target_lang = data_utils.infer_language_pair(args.data)
if args.source_lang is None or args.target_lang is None:
raise Exception('Could not infer language pair, please provide it explicitly')
# load dictionaries
src_dict = Dictionary.load(os.path.join(args.data, 'dict.{}.txt'.format(args.source_lang)))
tgt_dict = Dictionary.load(os.path.join(args.data, 'dict.{}.txt'.format(args.target_lang)))
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
print('| [{}] dictionary: {} types'.format(args.source_lang, len(src_dict)))
print('| [{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict)))
return cls(args, src_dict, tgt_dict)
示例2: setup_task
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import eval_bool [as 別名]
def setup_task(cls, args, **kwargs):
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
args.left_pad_context = options.eval_bool(args.left_pad_context)
# find language pair automatically
if args.source_lang is None or args.target_lang is None:
args.source_lang, args.target_lang = data_utils.infer_language_pair(args.data)
if args.source_lang is None or args.target_lang is None:
raise Exception('Could not infer language pair, please provide it explicitly')
# load dictionaries
ctx_dict = Dictionary.load(os.path.join(args.data, 'dict.ctx.txt'))
src_dict = Dictionary.load(os.path.join(args.data, 'dict.{}.txt'.format(args.source_lang)))
tgt_dict = Dictionary.load(os.path.join(args.data, 'dict.{}.txt'.format(args.target_lang)))
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
print('| [{}] dictionary: {} types'.format('ctx', len(ctx_dict)))
print('| [{}] dictionary: {} types'.format(args.source_lang, len(src_dict)))
print('| [{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict)))
return cls(args, src_dict, tgt_dict, ctx_dict)
示例3: setup_task
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import eval_bool [as 別名]
def setup_task(cls, args, **kwargs):
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
if args.source_lang is not None or args.target_lang is not None:
if args.lang_pairs is not None:
raise ValueError(
"--source-lang/--target-lang implies generation, which is "
"incompatible with --lang-pairs"
)
training = False
args.lang_pairs = ["{}-{}".format(args.source_lang, args.target_lang)]
else:
training = True
args.lang_pairs = args.lang_pairs.split(",")
args.source_lang, args.target_lang = args.lang_pairs[0].split("-")
dicts = tasks_utils.load_multilingual_vocabulary(args)
return cls(args, dicts, training)
示例4: setup_task
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import eval_bool [as 別名]
def setup_task(cls, args, **kwargs):
args.left_pad_source = options.eval_bool(args.left_pad_source)
source_dict = pytorch_translate_dictionary.Dictionary.load(
args.source_vocab_file
)
target_dict = pytorch_translate_dictionary.Dictionary.load(
args.target_vocab_file
)
source_lang = args.source_lang or "src"
target_lang = args.target_lang or "tgt"
args.append_bos = True
print(f"| [{source_lang}] dictionary: {len(source_dict)} types")
print(f"| [{target_lang}] dictionary: {len(target_dict)} types")
return cls(args, source_dict, target_dict)
示例5: setup_task
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import eval_bool [as 別名]
def setup_task(cls, args, **kwargs):
args.left_pad_source = options.eval_bool(args.left_pad_source)
# Load dictionaries
source_dict = MaskedLMDictionary.load(args.source_vocab_file)
target_dict = MaskedLMDictionary.load(args.target_vocab_file)
source_lang = args.source_lang or "src"
target_lang = args.target_lang or "tgt"
print(f"| [{source_lang}] dictionary: {len(source_dict)} types")
print(f"| [{target_lang}] dictionary: {len(target_dict)} types")
use_char_source = (args.char_source_vocab_file != "") or (
getattr(args, "arch", "") in constants.ARCHS_FOR_CHAR_SOURCE
)
if use_char_source:
char_source_dict = MaskedLMDictionary.load(args.char_source_vocab_file)
# this attribute is used for CharSourceModel construction
args.char_source_dict_size = len(char_source_dict)
else:
char_source_dict = None
return cls(args, source_dict, target_dict, char_source_dict)
示例6: setup_task
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import eval_bool [as 別名]
def setup_task(cls, args, **kwargs):
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
# find language pair automatically
if args.source_lang is None or args.target_lang is None:
args.source_lang, args.target_lang = data_utils.infer_language_pair(args.data)
if args.source_lang is None or args.target_lang is None:
raise Exception('Could not infer language pair, please provide it explicitly')
# load dictionaries
src_dict = Dictionary.load(os.path.join(args.data, 'dict.{}.txt'.format(args.source_lang)))
tgt_dict = Dictionary.load(os.path.join(args.data, 'dict.{}.txt'.format(args.target_lang)))
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
print('| [{}] dictionary: {} types'.format(args.source_lang, len(src_dict)))
print('| [{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict)))
return cls(args, src_dict, tgt_dict)
示例7: setup_task
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import eval_bool [as 別名]
def setup_task(cls, args, **kwargs):
"""Setup GEC task, including dictionary & model building."""
"""
Similar to the translation task, but also load labels dictionaries
"""
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
# find language pair automatically
if args.source_lang is None or args.target_lang is None:
args.source_lang, args.target_lang = data_utils.infer_language_pair(args.data[0])
if args.source_lang is None or args.target_lang is None:
raise Exception('Could not infer language pair, please provide it explicitly')
# load dictionaries
src_dict = cls.load_dictionary(os.path.join(args.data[0], 'dict.{}.txt'.format(args.source_lang)))
tgt_dict = cls.load_dictionary(os.path.join(args.data[0], 'dict.{}.txt'.format(args.target_lang)))
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
print('| [{}] dictionary: {} types'.format(args.source_lang, len(src_dict)))
print('| [{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict)))
return cls(args, src_dict, tgt_dict)
示例8: setup_task
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import eval_bool [as 別名]
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
# find language pair automatically
if args.source_lang is None or args.target_lang is None:
args.source_lang, args.target_lang = data_utils.infer_language_pair(args.data[0])
if args.source_lang is None or args.target_lang is None:
raise Exception('Could not infer language pair, please provide it explicitly')
# load dictionaries
src_dict = cls.load_dictionary(os.path.join(args.data[0], 'dict.{}.txt'.format(args.source_lang)))
tgt_dict = cls.load_dictionary(os.path.join(args.data[0], 'dict.{}.txt'.format(args.target_lang)))
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
print('| [{}] dictionary: {} types'.format(args.source_lang, len(src_dict)))
print('| [{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict)))
return cls(args, src_dict, tgt_dict)
示例9: setup_task
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import eval_bool [as 別名]
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
paths = utils.split_paths(args.data)
assert len(paths) > 0
# find language pair automatically
if args.source_lang is None or args.target_lang is None:
args.source_lang, args.target_lang = data_utils.infer_language_pair(paths[0])
if args.source_lang is None or args.target_lang is None:
raise Exception('Could not infer language pair, please provide it explicitly')
# load dictionaries
src_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.source_lang)))
tgt_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.target_lang)))
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
logger.info('[{}] dictionary: {} types'.format(args.source_lang, len(src_dict)))
logger.info('[{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict)))
return cls(args, src_dict, tgt_dict)
示例10: prepare
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import eval_bool [as 別名]
def prepare(cls, args, **kargs):
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
if args.lang_pairs is None:
raise ValueError('--lang-pairs is required. List all the language pairs in the training objective.')
if isinstance(args.lang_pairs, str):
args.lang_pairs = args.lang_pairs.split(',')
sorted_langs = sorted(list({x for lang_pair in args.lang_pairs for x in lang_pair.split('-')}))
if args.source_lang is not None or args.target_lang is not None:
training = False
else:
training = True
# load dictionaries
dicts = OrderedDict()
for lang in sorted_langs:
paths = utils.split_paths(args.data)
assert len(paths) > 0
dicts[lang] = Dictionary.load(os.path.join(paths[0], 'dict.{}.txt'.format(lang)))
if len(dicts) > 0:
assert dicts[lang].pad() == dicts[sorted_langs[0]].pad()
assert dicts[lang].eos() == dicts[sorted_langs[0]].eos()
assert dicts[lang].unk() == dicts[sorted_langs[0]].unk()
if args.encoder_langtok is not None or args.decoder_langtok:
for lang_to_add in sorted_langs:
dicts[lang].add_symbol(_lang_token(lang_to_add))
logger.info('[{}] dictionary: {} types'.format(lang, len(dicts[lang])))
return dicts, training
示例11: setup_task
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import eval_bool [as 別名]
def setup_task(cls, args, **kwargs):
assert pytorch_translate_data.is_multilingual(
args
), "Must set `--task pytorch_translate_multilingual` for multilingual training"
args.left_pad_source = options.eval_bool(args.left_pad_source)
def load_dicts(langs, paths):
dicts = OrderedDict()
for lang, dict_path in zip(langs, paths):
d = pytorch_translate_dictionary.Dictionary.load(dict_path)
dicts[lang] = d
print(f"| [{lang}] dictionary: {len(d)} types")
return dicts
if not hasattr(args, "multiling_source_vocab_file"):
args.multiling_encoder_lang = args.multiling_source_lang
args.multiling_source_vocab_file = [args.source_vocab_file]
if not hasattr(args, "multiling_target_vocab_file"):
args.multiling_decoder_lang = args.multiling_target_lang
args.multiling_target_vocab_file = [args.target_vocab_file]
# Load dictionaries
src_dicts = load_dicts(
args.multiling_encoder_lang, args.multiling_source_vocab_file
)
tgt_dicts = load_dicts(
args.multiling_decoder_lang, args.multiling_target_vocab_file
)
return cls(args, src_dicts, tgt_dicts)
示例12: setup_task
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import eval_bool [as 別名]
def setup_task(cls, args, **kwargs):
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
if args.source_lang is not None or args.target_lang is not None:
if args.lang_pairs is not None:
raise ValueError(
'--source-lang/--target-lang implies generation, which is '
'incompatible with --lang-pairs'
)
training = False
args.lang_pairs = ['{}-{}'.format(args.source_lang, args.target_lang)]
else:
training = True
args.lang_pairs = args.lang_pairs.split(',')
args.source_lang, args.target_lang = args.lang_pairs[0].split('-')
langs = list({x for lang_pair in args.lang_pairs for x in lang_pair.split('-')})
# load dictionaries
dicts = OrderedDict()
for lang in langs:
dicts[lang] = Dictionary.load(os.path.join(args.data, 'dict.{}.txt'.format(lang)))
if len(dicts) > 0:
assert dicts[lang].pad() == dicts[langs[0]].pad()
assert dicts[lang].eos() == dicts[langs[0]].eos()
assert dicts[lang].unk() == dicts[langs[0]].unk()
print('| [{}] dictionary: {} types'.format(lang, len(dicts[lang])))
return cls(args, dicts, training)
示例13: build_model
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import eval_bool [as 別名]
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted (in case there are any new ones)
base_architecture(args)
def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
embed_dict = utils.parse_embedding(embed_path)
utils.print_embed_overlap(embed_dict, dictionary)
return utils.load_embedding(embed_dict, dictionary, embed_tokens)
pretrained_encoder_embed = None
if args.encoder_embed_path:
pretrained_encoder_embed = load_pretrained_embedding_from_file(
args.encoder_embed_path, task.source_dictionary, args.encoder_embed_dim)
pretrained_decoder_embed = None
if args.decoder_embed_path:
pretrained_decoder_embed = load_pretrained_embedding_from_file(
args.decoder_embed_path, task.target_dictionary, args.decoder_embed_dim)
encoder = LSTMEncoder(
dictionary=task.source_dictionary,
embed_dim=args.encoder_embed_dim,
hidden_size=args.encoder_hidden_size,
num_layers=args.encoder_layers,
dropout_in=args.encoder_dropout_in,
dropout_out=args.encoder_dropout_out,
bidirectional=args.encoder_bidirectional,
pretrained_embed=pretrained_encoder_embed,
)
decoder = LSTMDecoder(
dictionary=task.target_dictionary,
embed_dim=args.decoder_embed_dim,
hidden_size=args.decoder_hidden_size,
out_embed_dim=args.decoder_out_embed_dim,
num_layers=args.decoder_layers,
dropout_in=args.decoder_dropout_in,
dropout_out=args.decoder_dropout_out,
attention=options.eval_bool(args.decoder_attention),
encoder_embed_dim=args.encoder_embed_dim,
encoder_output_units=encoder.output_units,
pretrained_embed=pretrained_decoder_embed,
)
return cls(encoder, decoder)
示例14: build_model
# 需要導入模塊: from fairseq import options [as 別名]
# 或者: from fairseq.options import eval_bool [as 別名]
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if getattr(args, 'max_target_positions', None) is not None:
max_target_positions = args.max_target_positions
else:
max_target_positions = getattr(args, 'tokens_per_sample', DEFAULT_MAX_TARGET_POSITIONS)
def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
embed_dict = utils.parse_embedding(embed_path)
utils.print_embed_overlap(embed_dict, dictionary)
return utils.load_embedding(embed_dict, dictionary, embed_tokens)
pretrained_decoder_embed = None
if args.decoder_embed_path:
pretrained_decoder_embed = load_pretrained_embedding_from_file(
args.decoder_embed_path,
task.target_dictionary,
args.decoder_embed_dim
)
if args.share_decoder_input_output_embed:
# double check all parameters combinations are valid
if task.source_dictionary != task.target_dictionary:
raise ValueError('--share-decoder-input-output-embeddings requires a joint dictionary')
if args.decoder_embed_dim != args.decoder_out_embed_dim:
raise ValueError(
'--share-decoder-input-output-embeddings requires '
'--decoder-embed-dim to match --decoder-out-embed-dim'
)
decoder = LSTMDecoder(
dictionary=task.dictionary,
embed_dim=args.decoder_embed_dim,
hidden_size=args.decoder_hidden_size,
out_embed_dim=args.decoder_out_embed_dim,
num_layers=args.decoder_layers,
dropout_in=args.decoder_dropout_in,
dropout_out=args.decoder_dropout_out,
attention=options.eval_bool(args.decoder_attention),
encoder_output_units=0,
pretrained_embed=pretrained_decoder_embed,
share_input_output_embed=args.share_decoder_input_output_embed,
adaptive_softmax_cutoff=(
options.eval_str_list(args.adaptive_softmax_cutoff, type=int)
if args.criterion == 'adaptive_loss' else None
),
max_target_positions=max_target_positions
)
return cls(decoder)