本文整理汇总了Python中allennlp.modules.text_field_embedders.TextFieldEmbedder方法的典型用法代码示例。如果您正苦于以下问题:Python text_field_embedders.TextFieldEmbedder方法的具体用法?Python text_field_embedders.TextFieldEmbedder怎么用?Python text_field_embedders.TextFieldEmbedder使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类allennlp.modules.text_field_embedders
的用法示例。
在下文中一共展示了text_field_embedders.TextFieldEmbedder方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from allennlp.modules import text_field_embedders [as 别名]
# 或者: from allennlp.modules.text_field_embedders import TextFieldEmbedder [as 别名]
def __init__(self,
embedder: TextFieldEmbedder,
vocab: Vocabulary,
lm_head: LanguageModelHead=None,
clf_head: ClassificationHead=None,
language_model_weight: float=.5) -> None:
super().__init__(vocab)
assert not (lm_head is None and clf_head is None)
self.embedder = embedder
self.clf_head = clf_head
self.lm_head = lm_head
self.language_model_weight = language_model_weight
self.vocab = vocab
示例2: __init__
# 需要导入模块: from allennlp.modules import text_field_embedders [as 别名]
# 或者: from allennlp.modules.text_field_embedders import TextFieldEmbedder [as 别名]
def __init__(self,
word_embeddings: TextFieldEmbedder,
n_kernels: int):
super(KNRM, self).__init__()
self.word_embeddings = word_embeddings
# static - kernel size & magnitude variables
self.mu = Variable(torch.cuda.FloatTensor(self.kernel_mus(n_kernels)), requires_grad=False).view(1, 1, 1, n_kernels)
self.sigma = Variable(torch.cuda.FloatTensor(self.kernel_sigmas(n_kernels)), requires_grad=False).view(1, 1, 1, n_kernels)
# this does not really do "attention" - just a plain cosine matrix calculation (without learnable weights)
self.cosine_module = CosineMatrixAttention()
# bias is set to True in original code (we found it to not help, how could it?)
self.dense = nn.Linear(n_kernels, 1, bias=False)
# init with small weights, otherwise the dense output is way to high for the tanh -> resulting in loss == 1 all the time
torch.nn.init.uniform_(self.dense.weight, -0.014, 0.014) # inits taken from matchzoo
#self.dense.bias.data.fill_(0.0)
示例3: __init__
# 需要导入模块: from allennlp.modules import text_field_embedders [as 别名]
# 或者: from allennlp.modules.text_field_embedders import TextFieldEmbedder [as 别名]
def __init__(self,
word_embeddings: TextFieldEmbedder,
vocab: Vocabulary,
lstm_hidden_dim: int,
top_k: int,
cuda_device: int) -> None:
super().__init__(vocab)
self.word_embeddings = word_embeddings
self.query_rep = nn.LSTM(self.word_embeddings.get_output_dim(),lstm_hidden_dim,batch_first=True,bidirectional=True)
self.doc_rep = nn.LSTM(self.word_embeddings.get_output_dim(),lstm_hidden_dim,batch_first=True,bidirectional=True)
# this does not really do "attention" - just a plain cosine matrix calculation (without learnable weights)
self.cosine_module = CosineMatrixAttention()
self.top_k = top_k
self.dense = nn.Linear(top_k, out_features=20, bias=True)
self.dense2 = nn.Linear(20, out_features=20, bias=True)
self.dense3 = nn.Linear(20, out_features=1, bias=False)
示例4: __init__
# 需要导入模块: from allennlp.modules import text_field_embedders [as 别名]
# 或者: from allennlp.modules.text_field_embedders import TextFieldEmbedder [as 别名]
def __init__(self,
vocab: Vocabulary,
word_embedder: TextFieldEmbedder,
character_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
character_encoder: Seq2VecEncoder) -> None:
super().__init__(vocab)
self._word_embedder = word_embedder
self._character_embedder = character_embedder
self._character_encoder = character_encoder
self._encoder = encoder
self._classifier = torch.nn.Linear(
in_features=encoder.get_output_dim(),
out_features=vocab.get_vocab_size('labels')
)
self._f1 = SpanBasedF1Measure(vocab, 'labels')
示例5: __init__
# 需要导入模块: from allennlp.modules import text_field_embedders [as 别名]
# 或者: from allennlp.modules.text_field_embedders import TextFieldEmbedder [as 别名]
def __init__(self,
vocab: Vocabulary,
embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder) -> None:
super().__init__(vocab)
self._embedder = embedder
self._encoder = encoder
self._classifier = torch.nn.Linear(
in_features=encoder.get_output_dim(),
out_features=vocab.get_vocab_size('labels')
)
self._crf = ConditionalRandomField(
vocab.get_vocab_size('labels')
)
self._f1 = SpanBasedF1Measure(vocab, 'labels')
示例6: __init__
# 需要导入模块: from allennlp.modules import text_field_embedders [as 别名]
# 或者: from allennlp.modules.text_field_embedders import TextFieldEmbedder [as 别名]
def __init__(self,
word_embeddings: TextFieldEmbedder,
conv_output_size: List[int],
conv_kernel_size: List[Tuple[int,int]],
adaptive_pooling_size: List[Tuple[int,int]]):
super(MatchPyramid, self).__init__()
self.word_embeddings = word_embeddings
if len(conv_output_size) != len(conv_kernel_size) or len(conv_output_size) != len(adaptive_pooling_size):
raise Exception("conv_output_size, conv_kernel_size, adaptive_pooling_size must have the same length")
# todo
示例7: __init__
# 需要导入模块: from allennlp.modules import text_field_embedders [as 别名]
# 或者: from allennlp.modules.text_field_embedders import TextFieldEmbedder [as 别名]
def __init__(self,
word_embeddings: TextFieldEmbedder,
n_kernels: int):
super(KNRM, self).__init__()
self.word_embeddings = word_embeddings
# static - kernel size & magnitude variables
self.mu = Variable(torch.FloatTensor(self.kernel_mus(n_kernels)), requires_grad=False).view(1, 1, 1, n_kernels)
self.sigma = Variable(torch.FloatTensor(self.kernel_sigmas(n_kernels)), requires_grad=False).view(1, 1, 1, n_kernels)
#todo
示例8: __init__
# 需要导入模块: from allennlp.modules import text_field_embedders [as 别名]
# 或者: from allennlp.modules.text_field_embedders import TextFieldEmbedder [as 别名]
def __init__(self,
word_embeddings: TextFieldEmbedder,
n_grams:int,
n_kernels: int,
conv_out_dim:int):
super(Conv_KNRM, self).__init__()
self.word_embeddings = word_embeddings
# static - kernel size & magnitude variables
self.mu = Variable(torch.FloatTensor(self.kernel_mus(n_kernels)), requires_grad=False).view(1, 1, 1, n_kernels)
self.sigma = Variable(torch.FloatTensor(self.kernel_sigmas(n_kernels)), requires_grad=False).view(1, 1, 1, n_kernels)
# todo
示例9: __init__
# 需要导入模块: from allennlp.modules import text_field_embedders [as 别名]
# 或者: from allennlp.modules.text_field_embedders import TextFieldEmbedder [as 别名]
def __init__(self,
word_embeddings: TextFieldEmbedder,
conv_output_size: List[int],
conv_kernel_size: List[Tuple[int,int]],
adaptive_pooling_size: List[Tuple[int,int]]):
super(MatchPyramid, self).__init__()
self.word_embeddings = word_embeddings
self.cosine_module = CosineMatrixAttention()
#self.cosine_module = DotProductMatrixAttention()
if len(conv_output_size) != len(conv_kernel_size) or len(conv_output_size) != len(adaptive_pooling_size):
raise Exception("conv_output_size, conv_kernel_size, adaptive_pooling_size must have the same length")
conv_layer_dict = OrderedDict()
last_channel_out = 1
for i in range(len(conv_output_size)):
conv_layer_dict["pad " +str(i)] = nn.ConstantPad2d((0,conv_kernel_size[i][0] - 1,0, conv_kernel_size[i][1] - 1), 0)
conv_layer_dict["conv "+str(i)] = nn.Conv2d(kernel_size=conv_kernel_size[i], in_channels=last_channel_out, out_channels=conv_output_size[i])
conv_layer_dict["relu "+str(i)] = nn.ReLU()
conv_layer_dict["pool "+str(i)] = nn.AdaptiveMaxPool2d(adaptive_pooling_size[i]) # this is strange - but so written in the paper
# would think only to pool at the end ??
last_channel_out = conv_output_size[i]
self.conv_layers = nn.Sequential(conv_layer_dict)
#self.dropout = nn.Dropout(0)
self.dense = nn.Linear(conv_output_size[-1] * adaptive_pooling_size[-1][0] * adaptive_pooling_size[-1][1], out_features=100, bias=True)
self.dense2 = nn.Linear(100, out_features=10, bias=True)
self.dense3 = nn.Linear(10, out_features=1, bias=False)
# init with small weights, otherwise the dense output is way to high for the tanh -> resulting in loss == 1 all the time
#torch.nn.init.uniform_(self.dense.weight, -0.014, 0.014) # inits taken from matchzoo
#self.dense.bias.data.fill_(0.0)
示例10: __init__
# 需要导入模块: from allennlp.modules import text_field_embedders [as 别名]
# 或者: from allennlp.modules.text_field_embedders import TextFieldEmbedder [as 别名]
def __init__(self,
word_embeddings: TextFieldEmbedder,
n_grams:int,
n_kernels: int,
conv_out_dim:int):
super(Conv_KNRM, self).__init__()
self.word_embeddings = word_embeddings
# static - kernel size & magnitude variables
self.mu = Variable(torch.cuda.FloatTensor(self.kernel_mus(n_kernels)), requires_grad=False).view(1, 1, 1, n_kernels)
self.sigma = Variable(torch.cuda.FloatTensor(self.kernel_sigmas(n_kernels)), requires_grad=False).view(1, 1, 1, n_kernels)
self.convolutions = []
for i in range(1, n_grams + 1):
self.convolutions.append(
nn.Sequential(
nn.ConstantPad1d((0,i - 1), 0),
nn.Conv1d(kernel_size=i, in_channels=word_embeddings.get_output_dim(), out_channels=conv_out_dim),
nn.ReLU())
)
self.convolutions = nn.ModuleList(self.convolutions) # register conv as part of the model
# this does not really do "attention" - just a plain cosine matrix calculation (without learnable weights)
self.cosine_module = CosineMatrixAttention()
# *9 because we concat the 3x3 conv match sums together before the dense layer
self.dense = nn.Linear(n_kernels * n_grams * n_grams, 1, bias=False)
# init with small weights, otherwise the dense output is way to high fot
torch.nn.init.uniform_(self.dense.weight, -0.014, 0.014) # inits taken from matchzoo
示例11: __init__
# 需要导入模块: from allennlp.modules import text_field_embedders [as 别名]
# 或者: from allennlp.modules.text_field_embedders import TextFieldEmbedder [as 别名]
def __init__(self,
word_embeddings: TextFieldEmbedder,
neural_ir_model: nn.Module):
super(NeuralIR_Encoder, self).__init__()
self.word_embeddings = word_embeddings
self.neural_ir_model = neural_ir_model
示例12: __init__
# 需要导入模块: from allennlp.modules import text_field_embedders [as 别名]
# 或者: from allennlp.modules.text_field_embedders import TextFieldEmbedder [as 别名]
def __init__(self,
word_embeddings: TextFieldEmbedder,
bin_count:int):
super(DRMM,self).__init__()
self.word_embeddings = word_embeddings
self.cosine_module = CosineMatrixAttention()
self.bin_count = bin_count
self.matching_classifier = FeedForward(input_dim=bin_count, num_layers=2, hidden_dims=[bin_count,1],activations=[Activation.by_name('tanh')(),Activation.by_name('tanh')()])
self.query_gate = FeedForward(input_dim=self.word_embeddings.get_output_dim(), num_layers=2, hidden_dims=[self.word_embeddings.get_output_dim(),1],activations=[Activation.by_name('tanh')(),Activation.by_name('tanh')()])
self.query_softmax = MaskedSoftmax()
示例13: __init__
# 需要导入模块: from allennlp.modules import text_field_embedders [as 别名]
# 或者: from allennlp.modules.text_field_embedders import TextFieldEmbedder [as 别名]
def __init__(self,
vocab: Vocabulary,
embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder) -> None:
super().__init__(vocab)
self._embedder = embedder
self._encoder = encoder
self._classifier = torch.nn.Linear(in_features=encoder.get_output_dim(),
out_features=vocab.get_vocab_size('labels'))
self._f1 = SpanBasedF1Measure(vocab, 'labels', 'IOB1')