本文整理匯總了Python中allennlp.nn.Activation.by_name方法的典型用法代碼示例。如果您正苦於以下問題:Python Activation.by_name方法的具體用法?Python Activation.by_name怎麽用?Python Activation.by_name使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類allennlp.nn.Activation
的用法示例。
在下文中一共展示了Activation.by_name方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from allennlp.nn import Activation [as 別名]
# 或者: from allennlp.nn.Activation import by_name [as 別名]
def __init__(
self,
matrix_1_dim: int,
matrix_2_dim: int,
activation: Activation = None,
use_input_biases: bool = False,
label_dim: int = 1,
) -> None:
super().__init__()
if use_input_biases:
matrix_1_dim += 1
matrix_2_dim += 1
if label_dim == 1:
self._weight_matrix = Parameter(torch.Tensor(matrix_1_dim, matrix_2_dim))
else:
self._weight_matrix = Parameter(torch.Tensor(label_dim, matrix_1_dim, matrix_2_dim))
self._bias = Parameter(torch.Tensor(1))
self._activation = activation or Activation.by_name("linear")()
self._use_input_biases = use_input_biases
self.reset_parameters()
示例2: test_feedforward_encoder_exactly_match_feedforward_each_item
# 需要導入模塊: from allennlp.nn import Activation [as 別名]
# 或者: from allennlp.nn.Activation import by_name [as 別名]
def test_feedforward_encoder_exactly_match_feedforward_each_item(self):
feedforward = FeedForward(
input_dim=10, num_layers=1, hidden_dims=10, activations=Activation.by_name("linear")()
)
encoder = FeedForwardEncoder(feedforward)
tensor = torch.randn([2, 3, 10])
output = encoder(tensor)
target = feedforward(tensor)
numpy.testing.assert_array_almost_equal(
target.detach().cpu().numpy(), output.detach().cpu().numpy()
)
# mask should work
mask = torch.tensor([[True, True, True], [True, False, False]])
output = encoder(tensor, mask)
target = feedforward(tensor) * mask.unsqueeze(dim=-1).float()
numpy.testing.assert_array_almost_equal(
target.detach().cpu().numpy(), output.detach().cpu().numpy()
)
示例3: from_params
# 需要導入模塊: from allennlp.nn import Activation [as 別名]
# 或者: from allennlp.nn.Activation import by_name [as 別名]
def from_params(cls, params ):
input_dim = params.pop_int(u'input_dim')
num_layers = params.pop_int(u'num_layers')
hidden_dims = params.pop(u'hidden_dims')
activations = params.pop(u'activations')
dropout = params.pop(u'dropout', 0.0)
if isinstance(activations, list):
activations = [Activation.by_name(name)() for name in activations]
else:
activations = Activation.by_name(activations)()
params.assert_empty(cls.__name__)
return cls(input_dim=input_dim,
num_layers=num_layers,
hidden_dims=hidden_dims,
activations=activations,
dropout=dropout)
示例4: __init__
# 需要導入模塊: from allennlp.nn import Activation [as 別名]
# 或者: from allennlp.nn.Activation import by_name [as 別名]
def __init__(
self,
encoder_output_dim: int,
action_embedding_dim: int,
input_attention: Attention,
activation: Activation = Activation.by_name("relu")(),
add_action_bias: bool = True,
dropout: float = 0.0,
) -> None:
super().__init__(
encoder_output_dim=encoder_output_dim,
action_embedding_dim=action_embedding_dim,
input_attention=input_attention,
activation=activation,
add_action_bias=add_action_bias,
dropout=dropout,
)
# See the class docstring for a description of what this does.
self._checklist_multiplier = Parameter(torch.FloatTensor([1.0]))
示例5: __init__
# 需要導入模塊: from allennlp.nn import Activation [as 別名]
# 或者: from allennlp.nn.Activation import by_name [as 別名]
def __init__(self,
tensor_1_dim: int,
tensor_2_dim: int,
combination: str = 'x,y',
activation: Activation = Activation.by_name('linear')()) -> None:
super(LinearExtenedSimilarity, self).__init__()
self._combination = combination
combined_dim = get_combined_dim(combination, [tensor_1_dim, tensor_2_dim])
self._weight_vector = Parameter(torch.Tensor(combined_dim))
self._bias = Parameter(torch.Tensor(1))
self._activation = activation
self.reset_parameters()
示例6: from_params
# 需要導入模塊: from allennlp.nn import Activation [as 別名]
# 或者: from allennlp.nn.Activation import by_name [as 別名]
def from_params(cls, params: Params) -> 'LinearExtenedSimilarity':
tensor_1_dim = params.pop_int("tensor_1_dim")
tensor_2_dim = params.pop_int("tensor_2_dim")
combination = params.pop("combination", "x,y")
activation = Activation.by_name(params.pop("activation", "linear"))()
params.assert_empty(cls.__name__)
return cls(tensor_1_dim=tensor_1_dim,
tensor_2_dim=tensor_2_dim,
combination=combination,
activation=activation)
示例7: from_params
# 需要導入模塊: from allennlp.nn import Activation [as 別名]
# 或者: from allennlp.nn.Activation import by_name [as 別名]
def from_params(cls, params: Params) -> 'LinearTransformSumReprCombination':
tensor1_dim = params.get("tensor_1_dim", 0)
tensor2_dim = params.get("tensor_2_dim", 0)
output_dim = params.get("output_dim", 0)
activation = Activation.by_name(params.get("activation", "linear"))()
return cls(tensor1_dim, tensor2_dim, output_dim, activation)
示例8: from_params
# 需要導入模塊: from allennlp.nn import Activation [as 別名]
# 或者: from allennlp.nn.Activation import by_name [as 別名]
def from_params(cls, params: Params) -> 'WeightedSumReprCombination':
keep_context_threshold = params.get("keep_context_threshold", 0.5)
tensor1_dim = params.get("tensor1_dim", 0)
tensor2_dim = params.get("tensor2_dim", 0)
output_dim = params.get("output_dim", 0)
activation = Activation.by_name(params.get("activation", "linear"))()
return cls(tensor1_dim, tensor2_dim, output_dim, keep_context_threshold, activation)
示例9: __init__
# 需要導入模塊: from allennlp.nn import Activation [as 別名]
# 或者: from allennlp.nn.Activation import by_name [as 別名]
def __init__(
self,
vector_dim: int,
matrix_dim: int,
activation: Activation = None,
normalize: bool = True,
) -> None:
super().__init__(normalize)
self._weight_matrix = Parameter(torch.Tensor(vector_dim, matrix_dim))
self._bias = Parameter(torch.Tensor(1))
self._activation = activation or Activation.by_name("linear")()
self.reset_parameters()
示例10: __init__
# 需要導入模塊: from allennlp.nn import Activation [as 別名]
# 或者: from allennlp.nn.Activation import by_name [as 別名]
def __init__(
self,
embedding_dim: int,
num_filters: int,
ngram_filter_sizes: Tuple[int, ...] = (2, 3, 4, 5),
conv_layer_activation: Activation = None,
output_dim: Optional[int] = None,
) -> None:
super().__init__()
self._embedding_dim = embedding_dim
self._num_filters = num_filters
self._ngram_filter_sizes = ngram_filter_sizes
self._activation = conv_layer_activation or Activation.by_name("relu")()
self._output_dim = output_dim
self._convolution_layers = [
Conv1d(
in_channels=self._embedding_dim,
out_channels=self._num_filters,
kernel_size=ngram_size,
)
for ngram_size in self._ngram_filter_sizes
]
for i, conv_layer in enumerate(self._convolution_layers):
self.add_module("conv_layer_%d" % i, conv_layer)
maxpool_output_dim = self._num_filters * len(self._ngram_filter_sizes)
if self._output_dim:
self.projection_layer = Linear(maxpool_output_dim, self._output_dim)
else:
self.projection_layer = None
self._output_dim = maxpool_output_dim
示例11: test_get_dimension_is_correct
# 需要導入模塊: from allennlp.nn import Activation [as 別名]
# 或者: from allennlp.nn.Activation import by_name [as 別名]
def test_get_dimension_is_correct(self):
feedforward = FeedForward(
input_dim=10, num_layers=1, hidden_dims=10, activations=Activation.by_name("linear")()
)
encoder = FeedForwardEncoder(feedforward)
assert encoder.get_input_dim() == feedforward.get_input_dim()
assert encoder.get_output_dim() == feedforward.get_output_dim()
示例12: test_init_checks_activation_consistency
# 需要導入模塊: from allennlp.nn import Activation [as 別名]
# 或者: from allennlp.nn.Activation import by_name [as 別名]
def test_init_checks_activation_consistency(self):
with pytest.raises(ConfigurationError):
FeedForward(2, 4, 5, [Activation.by_name("relu")(), Activation.by_name("relu")()])
示例13: __init__
# 需要導入模塊: from allennlp.nn import Activation [as 別名]
# 或者: from allennlp.nn.Activation import by_name [as 別名]
def __init__(self,
tensor_1_dim ,
tensor_2_dim ,
combination = u'x,y',
activation = None) :
super(LinearSimilarity, self).__init__()
self._combination = combination
combined_dim = util.get_combined_dim(combination, [tensor_1_dim, tensor_2_dim])
self._weight_vector = Parameter(torch.Tensor(combined_dim))
self._bias = Parameter(torch.Tensor(1))
self._activation = activation or Activation.by_name(u'linear')()
self.reset_parameters()
示例14: __init__
# 需要導入模塊: from allennlp.nn import Activation [as 別名]
# 或者: from allennlp.nn.Activation import by_name [as 別名]
def __init__(self,
tensor_1_dim ,
tensor_2_dim ,
activation = None) :
super(BilinearSimilarity, self).__init__()
self._weight_matrix = Parameter(torch.Tensor(tensor_1_dim, tensor_2_dim))
self._bias = Parameter(torch.Tensor(1))
self._activation = activation or Activation.by_name(u'linear')()
self.reset_parameters()
示例15: __init__
# 需要導入模塊: from allennlp.nn import Activation [as 別名]
# 或者: from allennlp.nn.Activation import by_name [as 別名]
def __init__(self,
vector_dim ,
matrix_dim ,
activation = None,
normalize = True) :
super(BilinearAttention, self).__init__(normalize)
self._weight_matrix = Parameter(torch.Tensor(vector_dim, matrix_dim))
self._bias = Parameter(torch.Tensor(1))
self._activation = activation or Activation.by_name(u'linear')()
self.reset_parameters()