本文整理汇总了Python中allennlp.modules.time_distributed.TimeDistributed方法的典型用法代码示例。如果您正苦于以下问题:Python time_distributed.TimeDistributed方法的具体用法?Python time_distributed.TimeDistributed怎么用?Python time_distributed.TimeDistributed使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类allennlp.modules.time_distributed
的用法示例。
在下文中一共展示了time_distributed.TimeDistributed方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: from allennlp.modules import time_distributed [as 别名]
# 或者: from allennlp.modules.time_distributed import TimeDistributed [as 别名]
def forward(self, elmo_tokens: torch.Tensor, word_inputs: torch.Tensor = None) -> torch.Tensor:
"""
# Parameters
elmo_tokens : `torch.Tensor`
Shape `(batch_size, timesteps, 50)` of character ids representing the current batch.
word_inputs : `torch.Tensor`, optional.
If you passed a cached vocab, you can in addition pass a tensor of shape
`(batch_size, timesteps)`, which represent word ids which have been pre-cached.
# Returns
`torch.Tensor`
The ELMo representations for the input sequence, shape
`(batch_size, timesteps, embedding_dim)`
"""
elmo_output = self._elmo(elmo_tokens, word_inputs)
elmo_representations = elmo_output["elmo_representations"][0]
if self._projection:
projection = self._projection
for _ in range(elmo_representations.dim() - 2):
projection = TimeDistributed(projection)
elmo_representations = projection(elmo_representations)
return elmo_representations
示例2: forward
# 需要导入模块: from allennlp.modules import time_distributed [as 别名]
# 或者: from allennlp.modules.time_distributed import TimeDistributed [as 别名]
def forward(self, inputs): # pylint: disable=arguments-differ
original_inputs = inputs
if original_inputs.dim() > 2:
inputs = inputs.view(-1, inputs.size(-1))
embedded = embedding(inputs, self.weight,
max_norm=self.max_norm,
norm_type=self.norm_type,
scale_grad_by_freq=self.scale_grad_by_freq,
sparse=self.sparse)
if original_inputs.dim() > 2:
view_args = list(original_inputs.size()) + [embedded.size(-1)]
embedded = embedded.view(*view_args)
if self._projection:
projection = self._projection
for _ in range(embedded.dim() - 2):
projection = TimeDistributed(projection)
embedded = projection(embedded)
return embedded
# Custom logic requires custom from_params.
示例3: __init__
# 需要导入模块: from allennlp.modules import time_distributed [as 别名]
# 或者: from allennlp.modules.time_distributed import TimeDistributed [as 别名]
def __init__(self, input_dim: int) -> None:
super().__init__()
self._input_dim = input_dim
self._global_attention = TimeDistributed(torch.nn.Linear(input_dim, 1))
示例4: forward
# 需要导入模块: from allennlp.modules import time_distributed [as 别名]
# 或者: from allennlp.modules.time_distributed import TimeDistributed [as 别名]
def forward(self, tokens: torch.Tensor) -> torch.Tensor:
# tokens may have extra dimensions (batch_size, d1, ..., dn, sequence_length),
# but embedding expects (batch_size, sequence_length), so pass tokens to
# util.combine_initial_dims (which is a no-op if there are no extra dimensions).
# Remember the original size.
original_size = tokens.size()
tokens = util.combine_initial_dims(tokens)
embedded = embedding(
tokens,
self.weight,
padding_idx=self.padding_index,
max_norm=self.max_norm,
norm_type=self.norm_type,
scale_grad_by_freq=self.scale_grad_by_freq,
sparse=self.sparse,
)
# Now (if necessary) add back in the extra dimensions.
embedded = util.uncombine_initial_dims(embedded, original_size)
if self._projection:
projection = self._projection
for _ in range(embedded.dim() - 2):
projection = TimeDistributed(projection)
embedded = projection(embedded)
return embedded
示例5: __init__
# 需要导入模块: from allennlp.modules import time_distributed [as 别名]
# 或者: from allennlp.modules.time_distributed import TimeDistributed [as 别名]
def __init__(self, embedding: Embedding, encoder: Seq2VecEncoder, dropout: float = 0.0) -> None:
super().__init__()
self._embedding = TimeDistributed(embedding)
self._encoder = TimeDistributed(encoder)
if dropout > 0:
self._dropout = torch.nn.Dropout(p=dropout)
else:
self._dropout = lambda x: x
示例6: forward
# 需要导入模块: from allennlp.modules import time_distributed [as 别名]
# 或者: from allennlp.modules.time_distributed import TimeDistributed [as 别名]
def forward(
self, text_field_input: TextFieldTensors, num_wrapping_dims: int = 0, **kwargs
) -> torch.Tensor:
if self._token_embedders.keys() != text_field_input.keys():
message = "Mismatched token keys: %s and %s" % (
str(self._token_embedders.keys()),
str(text_field_input.keys()),
)
raise ConfigurationError(message)
embedded_representations = []
for key in self._ordered_embedder_keys:
# Note: need to use getattr here so that the pytorch voodoo
# with submodules works with multiple GPUs.
embedder = getattr(self, "token_embedder_{}".format(key))
forward_params = inspect.signature(embedder.forward).parameters
forward_params_values = {}
missing_tensor_args = set()
for param in forward_params.keys():
if param in kwargs:
forward_params_values[param] = kwargs[param]
else:
missing_tensor_args.add(param)
for _ in range(num_wrapping_dims):
embedder = TimeDistributed(embedder)
tensors: Dict[str, torch.Tensor] = text_field_input[key]
if len(tensors) == 1 and len(missing_tensor_args) == 1:
# If there's only one tensor argument to the embedder, and we just have one tensor to
# embed, we can just pass in that tensor, without requiring a name match.
token_vectors = embedder(list(tensors.values())[0], **forward_params_values)
else:
# If there are multiple tensor arguments, we have to require matching names from the
# TokenIndexer. I don't think there's an easy way around that.
token_vectors = embedder(**tensors, **forward_params_values)
if token_vectors is not None:
# To handle some very rare use cases, we allow the return value of the embedder to
# be None; we just skip it in that case.
embedded_representations.append(token_vectors)
return torch.cat(embedded_representations, dim=-1)
示例7: __init__
# 需要导入模块: from allennlp.modules import time_distributed [as 别名]
# 或者: from allennlp.modules.time_distributed import TimeDistributed [as 别名]
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
phrase_probability: FeedForward,
edge_probability: FeedForward,
premise_encoder: Seq2SeqEncoder,
edge_embedding: Embedding,
use_encoding_for_node: bool,
ignore_edges: bool,
attention_similarity: SimilarityFunction,
initializer: InitializerApplicator = InitializerApplicator()) -> None:
super(TreeAttention, self).__init__(vocab)
self._text_field_embedder = text_field_embedder
self._premise_encoder = premise_encoder
self._nodes_attention = SingleTimeDistributed(MatrixAttention(attention_similarity), 0)
self._num_labels = vocab.get_vocab_size(namespace="labels")
self._phrase_probability = TimeDistributed(phrase_probability)
self._ignore_edges = ignore_edges
if not self._ignore_edges:
self._num_edges = vocab.get_vocab_size(namespace="edges")
self._edge_probability = TimeDistributed(edge_probability)
self._edge_embedding = edge_embedding
self._use_encoding_for_node = use_encoding_for_node
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
示例8: __init__
# 需要导入模块: from allennlp.modules import time_distributed [as 别名]
# 或者: from allennlp.modules.time_distributed import TimeDistributed [as 别名]
def __init__(self, embedding: Embedding, encoder: Seq2VecEncoder, dropout: float = 0.0) -> None:
super(UdifyTokenCharactersEncoder, self).__init__()
self._embedding = TimeDistributed(embedding)
self._encoder = TimeDistributed(encoder)
if dropout > 0:
self._dropout = torch.nn.Dropout(p=dropout)
else:
self._dropout = lambda x: x
示例9: __init__
# 需要导入模块: from allennlp.modules import time_distributed [as 别名]
# 或者: from allennlp.modules.time_distributed import TimeDistributed [as 别名]
def __init__(self,
input_dim ) :
super(SelfAttentiveSpanExtractor, self).__init__()
self._input_dim = input_dim
self._global_attention = TimeDistributed(torch.nn.Linear(input_dim, 1))
示例10: forward
# 需要导入模块: from allennlp.modules import time_distributed [as 别名]
# 或者: from allennlp.modules.time_distributed import TimeDistributed [as 别名]
def forward(self, # pylint: disable=arguments-differ
inputs ,
word_inputs = None) :
u"""
Parameters
----------
inputs: ``torch.Tensor``
Shape ``(batch_size, timesteps, 50)`` of character ids representing the current batch.
word_inputs : ``torch.Tensor``, optional.
If you passed a cached vocab, you can in addition pass a tensor of shape
``(batch_size, timesteps)``, which represent word ids which have been pre-cached.
Returns
-------
The ELMo representations for the input sequence, shape
``(batch_size, timesteps, embedding_dim)``
"""
elmo_output = self._elmo(inputs, word_inputs)
elmo_representations = elmo_output[u'elmo_representations'][0]
if self._projection:
projection = self._projection
for _ in range(elmo_representations.dim() - 2):
projection = TimeDistributed(projection)
elmo_representations = projection(elmo_representations)
return elmo_representations
# Custom vocab_to_cache logic requires a from_params implementation.
示例11: __init__
# 需要导入模块: from allennlp.modules import time_distributed [as 别名]
# 或者: from allennlp.modules.time_distributed import TimeDistributed [as 别名]
def __init__(self, embedding , encoder , dropout = 0.0) :
super(TokenCharactersEncoder, self).__init__()
self._embedding = TimeDistributed(embedding)
self._encoder = TimeDistributed(encoder)
if dropout > 0:
self._dropout = torch.nn.Dropout(p=dropout)
else:
self._dropout = lambda x: x
示例12: forward
# 需要导入模块: from allennlp.modules import time_distributed [as 别名]
# 或者: from allennlp.modules.time_distributed import TimeDistributed [as 别名]
def forward(self, text_field_input , num_wrapping_dims = 0) :
if list(self._token_embedders.keys()) != list(text_field_input.keys()):
if not self._allow_unmatched_keys:
message = u"Mismatched token keys: %s and %s" % (unicode(list(self._token_embedders.keys())),
unicode(list(text_field_input.keys())))
raise ConfigurationError(message)
embedded_representations = []
keys = sorted(self._token_embedders.keys())
for key in keys:
# If we pre-specified a mapping explictly, use that.
if self._embedder_to_indexer_map is not None:
tensors = [text_field_input[indexer_key] for
indexer_key in self._embedder_to_indexer_map[key]]
else:
# otherwise, we assume the mapping between indexers and embedders
# is bijective and just use the key directly.
tensors = [text_field_input[key]]
# Note: need to use getattr here so that the pytorch voodoo
# with submodules works with multiple GPUs.
embedder = getattr(self, u'token_embedder_{}'.format(key))
for _ in range(num_wrapping_dims):
embedder = TimeDistributed(embedder)
token_vectors = embedder(*tensors)
embedded_representations.append(token_vectors)
return torch.cat(embedded_representations, dim=-1)
# This is some unusual logic, it needs a custom from_params.
示例13: forward
# 需要导入模块: from allennlp.modules import time_distributed [as 别名]
# 或者: from allennlp.modules.time_distributed import TimeDistributed [as 别名]
def forward(self, text_field_input: Dict[str, torch.Tensor], num_wrapping_dims: int = 0) -> torch.Tensor:
if self._token_embedders.keys() != text_field_input.keys():
if not self._allow_unmatched_keys:
message = "Mismatched token keys: %s and %s" % (str(self._token_embedders.keys()),
str(text_field_input.keys()))
raise ConfigurationError(message)
embedded_representations = []
keys = sorted(self._token_embedders.keys())
for key in keys:
# If we pre-specified a mapping explictly, use that.
if self._embedder_to_indexer_map is not None:
tensors = [text_field_input[indexer_key] for
indexer_key in self._embedder_to_indexer_map[key]]
else:
# otherwise, we assume the mapping between indexers and embedders
# is bijective and just use the key directly.
tensors = [text_field_input[key]]
# Note: need to use getattr here so that the pytorch voodoo
# with submodules works with multiple GPUs.
embedder = getattr(self, 'token_embedder_{}'.format(key))
for _ in range(num_wrapping_dims):
embedder = TimeDistributed(embedder)
token_vectors = embedder(*tensors)
embedded_representations.append(token_vectors)
return torch.cat(embedded_representations, dim=-1)
# This is some unusual logic, it needs a custom from_params.
示例14: forward
# 需要导入模块: from allennlp.modules import time_distributed [as 别名]
# 或者: from allennlp.modules.time_distributed import TimeDistributed [as 别名]
def forward(self, # pylint: disable=arguments-differ
inputs: torch.Tensor) -> torch.Tensor:
"""
Parameters
----------
inputs: ``torch.Tensor``
Shape ``(batch_size, timesteps)`` of character ids representing the current batch.
Returns
-------
The VAMPIRE representations for the input sequence, shape
``(batch_size, timesteps, embedding_dim)`` or ``(batch_size, timesteps)``
depending on whether expand_dim is set to True.
"""
vae_output = self._vae(inputs)
embedded = vae_output['vae_representation']
self._layers = vae_output['layers']
if self._expand_dim:
embedded = (embedded.unsqueeze(0)
.expand(inputs.shape[1], inputs.shape[0], -1)
.permute(1, 0, 2).contiguous())
if self._projection:
projection = self._projection
for _ in range(embedded.dim() - 2):
projection = TimeDistributed(projection)
embedded = projection(embedded)
return embedded
# Custom vocab_to_cache logic requires a from_params implementation.
示例15: __init__
# 需要导入模块: from allennlp.modules import time_distributed [as 别名]
# 或者: from allennlp.modules.time_distributed import TimeDistributed [as 别名]
def __init__(self, embedding: Embedding, encoder: Seq2VecEncoder, projection_dim: int = None,
dropout: float = 0.0) -> None:
super(TokenCharactersEncoder, self).__init__()
self._embedding = TimeDistributed(embedding)
self._encoder = TimeDistributed(encoder)
self.output_dim = projection_dim or self._encoder._module.get_output_dim()
if projection_dim:
self._projection = torch.nn.Linear(self._encoder._module.get_output_dim(), projection_dim)
else:
self._projection = lambda x: x
if dropout > 0:
self._dropout = torch.nn.Dropout(p=dropout)
else:
self._dropout = lambda x: x