本文整理汇总了Python中torch.nn.functional.layer_norm方法的典型用法代码示例。如果您正苦于以下问题:Python functional.layer_norm方法的具体用法?Python functional.layer_norm怎么用?Python functional.layer_norm使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.layer_norm方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import layer_norm [as 别名]
def forward(self, input):
output = F.layer_norm(
input.float(),
self.normalized_shape,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(input)
示例2: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import layer_norm [as 别名]
def forward(self, x):
normalized_shape = x.size()[1:]
if self.affine:
return F.layer_norm(x, normalized_shape, self.weight.expand(normalized_shape), self.bias.expand(normalized_shape))
else:
return F.layer_norm(x, normalized_shape)
示例3: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import layer_norm [as 别名]
def forward(self, input, params=None):
if params is None:
params = OrderedDict(self.named_parameters())
weight = params.get('weight', None)
bias = params.get('bias', None)
return F.layer_norm(
input, self.normalized_shape, weight, bias, self.eps)
示例4: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import layer_norm [as 别名]
def forward(self, inp, attn_out=None):
assert inp.size(1) == self.d_model, "Feature dimension not match!!"
if self.pre_lnorm:
inp = F.layer_norm(inp.transpose(1,2), (self.d_model,)).transpose(1,2)
relu_out1 = self.drop1(F.relu(self.ff1_net(inp)))
out2 = self.drop2(self.ff2_net(relu_out1))
output = out2 + inp
if not self.pre_lnorm:
output = F.layer_norm(output.transpose(1,2), (self.d_model,)).transpose(1,2)
return output
示例5: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import layer_norm [as 别名]
def forward(self, input):
if not input.is_cuda:
return F.layer_norm(
input, self.normalized_shape, self.weight, self.bias, self.eps)
if self.elementwise_affine:
return FusedLayerNormAffineFunction.apply(
input, self.weight, self.bias, self.normalized_shape,self.eps)
else:
return FusedLayerNormFunction.apply(input, self.normalized_shape, self.eps)
示例6: test_layer_norm
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import layer_norm [as 别名]
def test_layer_norm(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = F.layer_norm(inp, inp.size()[1:], weight=None, bias=None, eps=1e-05)
示例7: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import layer_norm [as 别名]
def forward(self, x):
normalized_shape = x.size()[1:]
if self.affine:
return F.layer_norm(x, normalized_shape, self.weight.expand(normalized_shape), self.bias.expand(normalized_shape))
else:
return F.layer_norm(x, normalized_shape)