本文整理汇总了Python中allennlp.data.Vocabulary.from_files方法的典型用法代码示例。如果您正苦于以下问题:Python Vocabulary.from_files方法的具体用法?Python Vocabulary.from_files怎么用?Python Vocabulary.from_files使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类allennlp.data.Vocabulary
的用法示例。
在下文中一共展示了Vocabulary.from_files方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from allennlp.data import Vocabulary [as 别名]
# 或者: from allennlp.data.Vocabulary import from_files [as 别名]
def __init__(
self,
config: Config,
models: Dict[str, Type[nn.Module]],
gpu_ids: List[int] = [0],
cpu_workers: int = 0,
):
self._C = config
if self._C.PHASE != "program_prior":
raise ValueError(
f"Trying to initialize a ProgramPriorEvaluator, expected config PHASE to be "
f"program_prior, found {self._C.PHASE}"
)
# Initialize vocabulary, dataloader and model.
self._vocabulary = Vocabulary.from_files(self._C.DATA.VOCABULARY)
dataset = ProgramPriorDataset(self._C.DATA.VAL_TOKENS)
dataloader = DataLoader(dataset, batch_size=self._C.OPTIM.BATCH_SIZE)
super().__init__(config=config, dataloader=dataloader, models=models, gpu_ids=gpu_ids)
# This will be a part of `self._models`, keep this handle for convenience.
self._program_prior = self._models["program_prior"]
示例2: _load
# 需要导入模块: from allennlp.data import Vocabulary [as 别名]
# 或者: from allennlp.data.Vocabulary import from_files [as 别名]
def _load(cls,
config ,
serialization_dir ,
weights_file = None,
cuda_device = -1) :
u"""
Instantiates an already-trained model, based on the experiment
configuration and some optional overrides.
"""
weights_file = weights_file or os.path.join(serialization_dir, _DEFAULT_WEIGHTS)
# Load vocabulary from file
vocab_dir = os.path.join(serialization_dir, u'vocabulary')
vocab = Vocabulary.from_files(vocab_dir)
model_params = config.get(u'model')
# The experiment config tells us how to _train_ a model, including where to get pre-trained
# embeddings from. We're now _loading_ the model, so those embeddings will already be
# stored in our weights. We don't need any pretrained weight file anymore, and we don't
# want the code to look for it, so we remove it from the parameters here.
remove_pretrained_embedding_params(model_params)
model = Model.from_params(vocab=vocab, params=model_params)
model_state = torch.load(weights_file, map_location=util.device_mapping(cuda_device))
model.load_state_dict(model_state)
# Force model to cpu or gpu, as appropriate, to make sure that the embeddings are
# in sync with the weights
if cuda_device >= 0:
model.cuda(cuda_device)
else:
model.cpu()
return model
示例3: from_config
# 需要导入模块: from allennlp.data import Vocabulary [as 别名]
# 或者: from allennlp.data.Vocabulary import from_files [as 别名]
def from_config(cls, config: Config):
r"""Instantiate this class directly from a :class:`~probnmn.config.Config`."""
_C = config
return cls(
vocabulary=Vocabulary.from_files(_C.DATA.VOCABULARY),
input_size=_C.QUESTION_RECONSTRUCTOR.INPUT_SIZE,
hidden_size=_C.QUESTION_RECONSTRUCTOR.HIDDEN_SIZE,
num_layers=_C.QUESTION_RECONSTRUCTOR.NUM_LAYERS,
dropout=_C.QUESTION_RECONSTRUCTOR.DROPOUT,
)
示例4: from_config
# 需要导入模块: from allennlp.data import Vocabulary [as 别名]
# 或者: from allennlp.data.Vocabulary import from_files [as 别名]
def from_config(cls, config: Config):
r"""Instantiate this class directly from a :class:`~probnmn.config.Config`."""
_C = config
return cls(
vocabulary=Vocabulary.from_files(_C.DATA.VOCABULARY),
input_size=_C.PROGRAM_GENERATOR.INPUT_SIZE,
hidden_size=_C.PROGRAM_GENERATOR.HIDDEN_SIZE,
num_layers=_C.PROGRAM_GENERATOR.NUM_LAYERS,
dropout=_C.PROGRAM_GENERATOR.DROPOUT,
)
示例5: from_config
# 需要导入模块: from allennlp.data import Vocabulary [as 别名]
# 或者: from allennlp.data.Vocabulary import from_files [as 别名]
def from_config(cls, config: Config):
r"""Instantiate this class directly from a :class:`~probnmn.config.Config`."""
_C = config
return cls( # type: ignore
vocabulary=Vocabulary.from_files(_C.DATA.VOCABULARY),
image_feature_size=tuple(_C.NMN.IMAGE_FEATURE_SIZE),
module_channels=_C.NMN.MODULE_CHANNELS,
class_projection_channels=_C.NMN.CLASS_PROJECTION_CHANNELS,
classifier_linear_size=_C.NMN.CLASSIFIER_LINEAR_SIZE,
)
示例6: from_config
# 需要导入模块: from allennlp.data import Vocabulary [as 别名]
# 或者: from allennlp.data.Vocabulary import from_files [as 别名]
def from_config(cls, config: Config):
r"""Instantiate this class directly from a :class:`~probnmn.config.Config`."""
_C = config
return cls(
vocabulary=Vocabulary.from_files(_C.DATA.VOCABULARY),
input_size=_C.PROGRAM_PRIOR.INPUT_SIZE,
hidden_size=_C.PROGRAM_PRIOR.HIDDEN_SIZE,
num_layers=_C.PROGRAM_PRIOR.NUM_LAYERS,
dropout=_C.PROGRAM_PRIOR.DROPOUT,
)
示例7: __init__
# 需要导入模块: from allennlp.data import Vocabulary [as 别名]
# 或者: from allennlp.data.Vocabulary import from_files [as 别名]
def __init__(
self,
config: Config,
models: Dict[str, Type[nn.Module]],
gpu_ids: List[int] = [0],
cpu_workers: int = 0,
):
self._C = config
if self._C.PHASE != "joint_training":
raise ValueError(
f"Trying to initialize a JointTrainingEvaluator, expected config PHASE to be "
f"joint_training, found {self._C.PHASE}"
)
# Initialize vocabulary, dataloader and model.
self._vocabulary = Vocabulary.from_files(self._C.DATA.VOCABULARY)
# There is no notion of "supervision" during evaluation.
dataset = JointTrainingDataset(self._C.DATA.VAL_TOKENS, self._C.DATA.VAL_FEATURES)
dataloader = DataLoader(
dataset, batch_size=self._C.OPTIM.BATCH_SIZE, num_workers=cpu_workers
)
super().__init__(config=config, dataloader=dataloader, models=models, gpu_ids=gpu_ids)
# These will be a part of `self._models`, keep these handles for convenience.
self._program_generator = self._models["program_generator"]
self._nmn = self._models["nmn"]