本文整理匯總了Python中fairseq.utils.import_user_module方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.import_user_module方法的具體用法?Python utils.import_user_module怎麽用?Python utils.import_user_module使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類fairseq.utils
的用法示例。
在下文中一共展示了utils.import_user_module方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: load_model
# 需要導入模塊: from fairseq import utils [as 別名]
# 或者: from fairseq.utils import import_user_module [as 別名]
def load_model(self, args):
args.user_dir = os.path.join(os.path.dirname(__file__), '..', '..')
utils.import_user_module(args)
filename = args.model_path
if not os.path.exists(filename):
raise IOError("Model file not found: {}".format(filename))
state = checkpoint_utils.load_checkpoint_to_cpu(filename, json.loads(args.model_overrides))
saved_args = state["args"]
saved_args.data = args.data_bin
task = tasks.setup_task(saved_args)
# build model for ensemble
self.model = task.build_model(saved_args)
self.model.load_state_dict(state["model"], strict=True)
# Set dictionary
self.load_dictionary(task)
示例2: _initialize_fairseq
# 需要導入模塊: from fairseq import utils [as 別名]
# 或者: from fairseq.utils import import_user_module [as 別名]
def _initialize_fairseq(user_dir):
global FAIRSEQ_INITIALIZED
if not FAIRSEQ_INITIALIZED:
logging.info("Setting up fairseq library...")
if user_dir:
args = type("", (), {"user_dir": user_dir})()
fairseq_utils.import_user_module(args)
FAIRSEQ_INITIALIZED = True
示例3: from_pretrained
# 需要導入模塊: from fairseq import utils [as 別名]
# 或者: from fairseq.utils import import_user_module [as 別名]
def from_pretrained(
model_name_or_path,
checkpoint_file='model.pt',
data_name_or_path='.',
archive_map=None,
**kwargs
):
from fairseq import checkpoint_utils, file_utils
if archive_map is not None:
if model_name_or_path in archive_map:
model_name_or_path = archive_map[model_name_or_path]
if data_name_or_path is not None and data_name_or_path in archive_map:
data_name_or_path = archive_map[data_name_or_path]
# allow archive_map to set default arg_overrides (e.g., tokenizer, bpe)
# for each model
if isinstance(model_name_or_path, dict):
for k, v in model_name_or_path.items():
if k == 'checkpoint_file':
checkpoint_file = v
elif (
k != 'path'
# only set kwargs that don't already have overrides
and k not in kwargs
):
kwargs[k] = v
model_name_or_path = model_name_or_path['path']
model_path = file_utils.load_archive_file(model_name_or_path)
# convenience hack for loading data and BPE codes from model archive
if data_name_or_path.startswith('.'):
kwargs['data'] = os.path.abspath(os.path.join(model_path, data_name_or_path))
else:
kwargs['data'] = file_utils.load_archive_file(data_name_or_path)
for file, arg in {
'code': 'bpe_codes',
'bpecodes': 'bpe_codes',
'sentencepiece.bpe.model': 'sentencepiece_vocab',
}.items():
path = os.path.join(model_path, file)
if os.path.exists(path):
kwargs[arg] = path
if 'user_dir' in kwargs:
utils.import_user_module(argparse.Namespace(user_dir=kwargs['user_dir']))
models, args, task = checkpoint_utils.load_model_ensemble_and_task(
[os.path.join(model_path, cpt) for cpt in checkpoint_file.split(os.pathsep)],
arg_overrides=kwargs,
)
return {
'args': args,
'task': task,
'models': models,
}