本文整理汇总了Python中torch.nn.functional.adaptive_avg_pool1d方法的典型用法代码示例。如果您正苦于以下问题:Python functional.adaptive_avg_pool1d方法的具体用法?Python functional.adaptive_avg_pool1d怎么用?Python functional.adaptive_avg_pool1d使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.adaptive_avg_pool1d方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fuse_skip
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import adaptive_avg_pool1d [as 别名]
def fuse_skip(self, input_, skip):
#print('input_ shape: ', input_.shape)
#print('skip shape: ', skip.shape)
dfactor = skip.shape[2] // input_.shape[2]
if dfactor > 1:
#print('dfactor: ', dfactor)
# downsample skips
# [B, F, T]
maxlen = input_.shape[2] * dfactor
skip = skip[:, :, :maxlen]
bsz, feats, slen = skip.shape
skip_re = skip.view(bsz, feats, slen // dfactor, dfactor)
skip = torch.mean(skip_re, dim=3)
#skip = F.adaptive_avg_pool1d(skip, input_.shape[2])
if self.densemerge == 'concat':
return torch.cat((input_, skip), dim=1)
elif self.densemerge == 'sum':
return input_ + skip
else:
raise TypeError('Unknown densemerge: ', self.densemerge)
示例2: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import adaptive_avg_pool1d [as 别名]
def forward(self, graph):
nodes = graph.node_tensor
out = self.preprocess(nodes)
out = out.reshape(out.size(0), out.size(1) * out.size(2), 1)
out += self.merge(nodes).reshape(out.size(0), out.size(1) * out.size(2), 1)
out = self.activation(out)
for _ in range(self.depth - 1):
out -= graph.laplacian_action(out)
out = self.propagate(out)
out += self.merge(nodes).reshape(out.size(0), out.size(1) * out.size(2), 1)
out = self.activation(out)
out = out.reshape(nodes.size(0), nodes.size(1), self.width)
out = func.adaptive_avg_pool1d(out, 1).reshape(
nodes.size(0), -1
).unsqueeze(2)
result = graph.new_like()
result.node_tensor = out
return result
示例3: fuse
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import adaptive_avg_pool1d [as 别名]
def fuse(self, out):
last_feature = out[-1]
for i in range(len(out) - 1):
out[i] = F.adaptive_avg_pool1d(out[i], last_feature.shape[-1])
return out
示例4: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import adaptive_avg_pool1d [as 别名]
def forward(self, data, structure):
out = self.preprocess(data, data, structure)
for block in self.blocks:
out = block(out, data, structure)
out = self.postprocess(out, data, structure)
out = out.reshape(data.size(0), -1, self.width)
return func.adaptive_avg_pool1d(out, 1)
示例5: pool
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import adaptive_avg_pool1d [as 别名]
def pool(self, input):
return F.adaptive_avg_pool1d(input,1)
示例6: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import adaptive_avg_pool1d [as 别名]
def forward(self, inputs, state):
x = self.embedder(inputs)
x = x.transpose(1, 2)
state = F.adaptive_avg_pool1d(state, x.size(2))
x = torch.cat([x, state], 1)
x = self.convs(x)
x = x.transpose(1, 2) # BxTxN
x = x.contiguous().view(-1, x.size(2))
x = self.classifier(x)
x = x.view(inputs.size(0), inputs.size(1), -1) # BxTxN
return x
示例7: test_adaptive_avg_pool1d
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import adaptive_avg_pool1d [as 别名]
def test_adaptive_avg_pool1d(self):
inp = torch.randn(1, 1, 28, device='cuda', dtype=self.dtype)
out = F.adaptive_avg_pool1d(inp, output_size=5)
示例8: pool
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import adaptive_avg_pool1d [as 别名]
def pool(self, x, bs, is_max):
f = F.adaptive_max_pool1d if is_max else F.adaptive_avg_pool1d
return f(x.permute(1, 2, 0), (1,)).view(bs, -1)
示例9: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import adaptive_avg_pool1d [as 别名]
def forward(self, e_x, e_sig, x, x_sig):
e_x = e_x.long()
x = x.float()
x_sig = x_sig.float()
e_sig = e_sig.float()
choices = x.size(1)
e_x = self._pos_embeddings(e_x)
e_x = e_x.transpose(1, 2)
e_x = F.adaptive_avg_pool1d(e_x, 1).view(*e_x.size()[:2])
e_x = e_x.unsqueeze(1)
e_x = e_x.expand(e_x.size(0), choices, e_x.size(2)).contiguous()
e_sig = e_sig.unsqueeze(1)
e_sig = e_sig.expand(e_sig.size(0), choices, e_sig.size(2)).contiguous()
x = torch.cat((
x,
x_sig,
e_x,
e_sig), dim=-1)
x = x.view(-1, x.size(-1))
i = self.individual_weights(x)
i = F.relu(i)
i = self.hidden_weights(i)
i = F.relu(i)
i = i.view(-1, choices, i.size(-1))
s = i.transpose(1, 2)
s = F.adaptive_max_pool1d(s, 1)
s = s.transpose(1, 2)
v = s.expand_as(i)
v = torch.cat((i, v), dim=-1)
v = v.view(-1, v.size(-1))
v = self._dropout(v)
x = self.score_weights(v)
x = x.view(-1, choices)
# x = F.relu(x)
z = s.view(-1, s.size(-1))
z = self._dropout(z)
z = self.negative_weights(z)
# x = torch.cat((z, x), dim=-1)
return F.sigmoid(z.squeeze(dim=-1)), F.softmax(x, dim=-1)