本文整理匯總了Python中allennlp.modules.InputVariationalDropout方法的典型用法代碼示例。如果您正苦於以下問題:Python modules.InputVariationalDropout方法的具體用法?Python modules.InputVariationalDropout怎麽用?Python modules.InputVariationalDropout使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類allennlp.modules
的用法示例。
在下文中一共展示了modules.InputVariationalDropout方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import InputVariationalDropout [as 別名]
def __init__(self,
encoder: Seq2SeqEncoder,
projection_feedforward: FeedForward,
inference_encoder: Seq2SeqEncoder,
output_feedforward: FeedForward,
similarity_function: SimilarityFunction = None,
dropout: float = 0.5) -> None:
super().__init__()
self._encoder = encoder
self._matrix_attention = LegacyMatrixAttention(similarity_function)
self._projection_feedforward = projection_feedforward
self._inference_encoder = inference_encoder
if dropout:
self.dropout = torch.nn.Dropout(dropout)
self.rnn_input_dropout = InputVariationalDropout(dropout)
else:
self.dropout = None
self.rnn_input_dropout = None
self._output_feedforward = output_feedforward
示例2: __init__
# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import InputVariationalDropout [as 別名]
def __init__(self,
vocab: Vocabulary,
span_encoder: Seq2SeqEncoder,
reasoning_encoder: Seq2SeqEncoder,
input_dropout: float = 0.3,
hidden_dim_maxpool: int = 1024,
class_embs: bool = True,
reasoning_use_obj: bool = True,
reasoning_use_answer: bool = True,
reasoning_use_question: bool = True,
pool_reasoning: bool = True,
pool_answer: bool = True,
pool_question: bool = False,
initializer: InitializerApplicator = InitializerApplicator(),
):
super(HGL_Model, self).__init__(vocab)
self.detector = SimpleDetector(pretrained=True, average_pool=True, semantic=class_embs, final_dim=512)
###################################################################################################
self.rnn_input_dropout = TimeDistributed(InputVariationalDropout(input_dropout)) if input_dropout > 0 else None
self.span_encoder = TimeDistributed(span_encoder)
self.reasoning_encoder = TimeDistributed(reasoning_encoder)
self.Graph_reasoning = Graph_reasoning(512)
self.QAHG = BilinearMatrixAttention(
matrix_1_dim=span_encoder.get_output_dim(),
matrix_2_dim=span_encoder.get_output_dim(),
)
self.VAHG = BilinearMatrixAttention(
matrix_1_dim=span_encoder.get_output_dim(),
matrix_2_dim=self.detector.final_dim,
)
self.reasoning_use_obj = reasoning_use_obj
self.reasoning_use_answer = reasoning_use_answer
self.reasoning_use_question = reasoning_use_question
self.pool_reasoning = pool_reasoning
self.pool_answer = pool_answer
self.pool_question = pool_question
dim = sum([d for d, to_pool in [(reasoning_encoder.get_output_dim(), self.pool_reasoning),
(span_encoder.get_output_dim(), self.pool_answer),
(span_encoder.get_output_dim(), self.pool_question)] if to_pool])
self.final_mlp = torch.nn.Sequential(
torch.nn.Dropout(input_dropout, inplace=False),
torch.nn.Linear(dim, hidden_dim_maxpool),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(input_dropout, inplace=False),
torch.nn.Linear(hidden_dim_maxpool, 1),
)
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
示例3: __init__
# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import InputVariationalDropout [as 別名]
def __init__(self, vocab ,
text_field_embedder ,
encoder ,
similarity_function ,
projection_feedforward ,
inference_encoder ,
output_feedforward ,
output_logit ,
dropout = 0.5,
initializer = InitializerApplicator(),
regularizer = None) :
super(ESIM, self).__init__(vocab, regularizer)
self._text_field_embedder = text_field_embedder
self._encoder = encoder
self._matrix_attention = LegacyMatrixAttention(similarity_function)
self._projection_feedforward = projection_feedforward
self._inference_encoder = inference_encoder
if dropout:
self.dropout = torch.nn.Dropout(dropout)
self.rnn_input_dropout = InputVariationalDropout(dropout)
else:
self.dropout = None
self.rnn_input_dropout = None
self._output_feedforward = output_feedforward
self._output_logit = output_logit
self._num_labels = vocab.get_vocab_size(namespace=u"labels")
check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(),
u"text field embedding dim", u"encoder input dim")
check_dimensions_match(encoder.get_output_dim() * 4, projection_feedforward.get_input_dim(),
u"encoder output dim", u"projection feedforward input")
check_dimensions_match(projection_feedforward.get_output_dim(), inference_encoder.get_input_dim(),
u"proj feedforward output dim", u"inference lstm input dim")
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)
示例4: __init__
# 需要導入模塊: from allennlp import modules [as 別名]
# 或者: from allennlp.modules import InputVariationalDropout [as 別名]
def __init__(self,
vocab: Vocabulary,
span_encoder: Seq2SeqEncoder,
reasoning_encoder: Seq2SeqEncoder,
input_dropout: float = 0.3,
hidden_dim_maxpool: int = 1024,
class_embs: bool=True,
reasoning_use_obj: bool=True,
reasoning_use_answer: bool=True,
reasoning_use_question: bool=True,
pool_reasoning: bool = True,
pool_answer: bool = True,
pool_question: bool = False,
initializer: InitializerApplicator = InitializerApplicator(),
):
super(AttentionQA, self).__init__(vocab)
self.detector = SimpleDetector(pretrained=True, average_pool=True, semantic=class_embs, final_dim=512)
###################################################################################################
self.rnn_input_dropout = TimeDistributed(InputVariationalDropout(input_dropout)) if input_dropout > 0 else None
self.span_encoder = TimeDistributed(span_encoder)
self.reasoning_encoder = TimeDistributed(reasoning_encoder)
self.span_attention = BilinearMatrixAttention(
matrix_1_dim=span_encoder.get_output_dim(),
matrix_2_dim=span_encoder.get_output_dim(),
)
self.obj_attention = BilinearMatrixAttention(
matrix_1_dim=span_encoder.get_output_dim(),
matrix_2_dim=self.detector.final_dim,
)
self.reasoning_use_obj = reasoning_use_obj
self.reasoning_use_answer = reasoning_use_answer
self.reasoning_use_question = reasoning_use_question
self.pool_reasoning = pool_reasoning
self.pool_answer = pool_answer
self.pool_question = pool_question
dim = sum([d for d, to_pool in [(reasoning_encoder.get_output_dim(), self.pool_reasoning),
(span_encoder.get_output_dim(), self.pool_answer),
(span_encoder.get_output_dim(), self.pool_question)] if to_pool])
self.final_mlp = torch.nn.Sequential(
torch.nn.Dropout(input_dropout, inplace=False),
torch.nn.Linear(dim, hidden_dim_maxpool),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(input_dropout, inplace=False),
torch.nn.Linear(hidden_dim_maxpool, 1),
)
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
initializer(self)