本文整理汇总了Python中fairseq.utils.import_user_module方法的典型用法代码示例。如果您正苦于以下问题:Python utils.import_user_module方法的具体用法?Python utils.import_user_module怎么用?Python utils.import_user_module使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类fairseq.utils
的用法示例。
在下文中一共展示了utils.import_user_module方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_model
# 需要导入模块: from fairseq import utils [as 别名]
# 或者: from fairseq.utils import import_user_module [as 别名]
def load_model(self, args):
args.user_dir = os.path.join(os.path.dirname(__file__), '..', '..')
utils.import_user_module(args)
filename = args.model_path
if not os.path.exists(filename):
raise IOError("Model file not found: {}".format(filename))
state = checkpoint_utils.load_checkpoint_to_cpu(filename, json.loads(args.model_overrides))
saved_args = state["args"]
saved_args.data = args.data_bin
task = tasks.setup_task(saved_args)
# build model for ensemble
self.model = task.build_model(saved_args)
self.model.load_state_dict(state["model"], strict=True)
# Set dictionary
self.load_dictionary(task)
示例2: _initialize_fairseq
# 需要导入模块: from fairseq import utils [as 别名]
# 或者: from fairseq.utils import import_user_module [as 别名]
def _initialize_fairseq(user_dir):
global FAIRSEQ_INITIALIZED
if not FAIRSEQ_INITIALIZED:
logging.info("Setting up fairseq library...")
if user_dir:
args = type("", (), {"user_dir": user_dir})()
fairseq_utils.import_user_module(args)
FAIRSEQ_INITIALIZED = True
示例3: from_pretrained
# 需要导入模块: from fairseq import utils [as 别名]
# 或者: from fairseq.utils import import_user_module [as 别名]
def from_pretrained(
model_name_or_path,
checkpoint_file='model.pt',
data_name_or_path='.',
archive_map=None,
**kwargs
):
from fairseq import checkpoint_utils, file_utils
if archive_map is not None:
if model_name_or_path in archive_map:
model_name_or_path = archive_map[model_name_or_path]
if data_name_or_path is not None and data_name_or_path in archive_map:
data_name_or_path = archive_map[data_name_or_path]
# allow archive_map to set default arg_overrides (e.g., tokenizer, bpe)
# for each model
if isinstance(model_name_or_path, dict):
for k, v in model_name_or_path.items():
if k == 'checkpoint_file':
checkpoint_file = v
elif (
k != 'path'
# only set kwargs that don't already have overrides
and k not in kwargs
):
kwargs[k] = v
model_name_or_path = model_name_or_path['path']
model_path = file_utils.load_archive_file(model_name_or_path)
# convenience hack for loading data and BPE codes from model archive
if data_name_or_path.startswith('.'):
kwargs['data'] = os.path.abspath(os.path.join(model_path, data_name_or_path))
else:
kwargs['data'] = file_utils.load_archive_file(data_name_or_path)
for file, arg in {
'code': 'bpe_codes',
'bpecodes': 'bpe_codes',
'sentencepiece.bpe.model': 'sentencepiece_vocab',
}.items():
path = os.path.join(model_path, file)
if os.path.exists(path):
kwargs[arg] = path
if 'user_dir' in kwargs:
utils.import_user_module(argparse.Namespace(user_dir=kwargs['user_dir']))
models, args, task = checkpoint_utils.load_model_ensemble_and_task(
[os.path.join(model_path, cpt) for cpt in checkpoint_file.split(os.pathsep)],
arg_overrides=kwargs,
)
return {
'args': args,
'task': task,
'models': models,
}