本文整理汇总了Python中torch.nn.functional.max_pool3d方法的典型用法代码示例。如果您正苦于以下问题:Python functional.max_pool3d方法的具体用法?Python functional.max_pool3d怎么用?Python functional.max_pool3d使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.max_pool3d方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: apply
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import max_pool3d [as 别名]
def apply(features: Tensor, proposal_bboxes: Tensor, proposal_batch_indices: Tensor, mode: Mode) -> Tensor:
_, _, feature_map_t, feature_map_height, feature_map_width = features.shape
scale = 1 / 16
output_size = (feature_map_t, 7, 7)
if mode == Pooler.Mode.POOLING:
pool = []
for (proposal_bbox, proposal_batch_index) in zip(proposal_bboxes, proposal_batch_indices):
start_x = max(min(round(proposal_bbox[0].item() * scale), feature_map_width - 1),
0) # [0, feature_map_width)
start_y = max(min(round(proposal_bbox[1].item() * scale), feature_map_height - 1),
0) # (0, feature_map_height]
end_x = max(min(round(proposal_bbox[2].item() * scale) + 1, feature_map_width),
1) # [0, feature_map_width)
end_y = max(min(round(proposal_bbox[3].item() * scale) + 1, feature_map_height),
1) # (0, feature_map_height]
roi_feature_map = features[proposal_batch_index, :, :, start_y:end_y, start_x:end_x]
pool.append(F.adaptive_max_pool3d(input=roi_feature_map, output_size=output_size))
pool = torch.stack(pool, dim=0)
else:
raise ValueError
#pool = F.max_pool3d(input=pool, kernel_size=(1, 2, 2), stride=(1, 2, 2))
return pool
示例2: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import max_pool3d [as 别名]
def forward(self,x):
# print 'line 114: x shape: ',x.size()
#x = F.max_pool3d(F.relu(self.bn1(self.conv1(x))),(2,2,2))#conv->relu->pool
x = F.max_pool3d(F.relu(self.conv1(x)),(2,2,2))#conv->relu->pool
x = F.max_pool3d(F.relu(self.conv2(x)),(2,2,2))#conv->relu->pool
x = F.max_pool3d(F.relu(self.conv3(x)),(2,2,2))#conv->relu->pool
#reshape them into Vector, review ruturned tensor shares the same data but have different shape, same as reshape in matlab
x = x.view(-1,self.num_of_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
#x = F.sigmoid(x)
#print 'min,max,mean of x in 0st layer',x.min(),x.max(),x.mean()
return x
示例3: _max_pool_impl
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import max_pool3d [as 别名]
def _max_pool_impl(input, # type: torch.Tensor
size, # type: List[int]
border='constant', # type: str
padding=None, # type: Optional[List[Tuple[int, int]]]
stride=None, # type: Optional[List[int]]
dilation=None, # type: Optional[List[int]]
with_index=False, # type: bool
):
# type: (...)->torch.Tensor
spatial_dims = len(input.shape) - 2
value = float('-inf') if border == 'ignore' else 0.0
border = 'constant' if border == 'ignore' else border
pad = nnef_pad(input=input, padding=padding, border=border, value=value)
result = {1: F.max_pool1d, 2: F.max_pool2d, 3: F.max_pool3d}[spatial_dims](input=pad,
kernel_size=size[2:],
stride=stride[2:],
padding=0,
dilation=dilation[2:],
return_indices=with_index)
return result
示例4: _compute_block_mask
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import max_pool3d [as 别名]
def _compute_block_mask(self, mask):
block_mask = F.max_pool3d(input=mask[:, None, :, :, :],
kernel_size=(self.block_size, self.block_size, self.block_size),
stride=(1, 1, 1),
padding=self.block_size // 2)
if self.block_size % 2 == 0:
block_mask = block_mask[:, :, :-1, :-1, :-1]
block_mask = 1 - block_mask.squeeze(1)
return block_mask
示例5: temporal_pool
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import max_pool3d [as 别名]
def temporal_pool(x, n_segment):
nt, c, h, w = x.size()
n_batch = nt // n_segment
x = x.view(n_batch, n_segment, c, h, w).transpose(1, 2) # n, c, t, h, w
x = F.max_pool3d(x, kernel_size=(3, 1, 1), stride=(2, 1, 1), padding=(1, 0, 0))
x = x.transpose(1, 2).contiguous().view(nt // 2, c, h, w)
return x
示例6: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import max_pool3d [as 别名]
def forward(self, x):
return F.max_pool3d(x, kernel_size=self.pool_size, stride=self.pool_size)
示例7: down
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import max_pool3d [as 别名]
def down(x, ks=(1, 2, 2)):
# Before pooling, we manually check if the tensor is divisible by the pooling kernel
# size, because PyTorch doesn't throw an error if it's not divisible, but calculates
# the output shape by floor division instead. While this may make sense for other
# architectures, in U-Net this would lead to incorrect output shapes after upsampling.
sh = x.shape[2:]
if any([s % k != 0 for s, k in zip(sh, ks)]):
raise PoolingError(
f'Can\'t pool {sh} input by a {ks} kernel. Please adjust the input shape.'
)
return F.max_pool3d(x, ks)
示例8: maxpool3d
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import max_pool3d [as 别名]
def maxpool3d(
module: Module, inputs, outputs, grad_input, grad_output, eps: float = 1e-10
):
return maxpool(
module,
F.max_pool3d,
F.max_unpool3d,
inputs,
outputs,
grad_input,
grad_output,
eps=eps,
)
示例9: TrainForward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import max_pool3d [as 别名]
def TrainForward(self,x,y,GetGlobalFeat=False):
y= F.max_pool3d(y,kernel_size=(2,4,4),stride=(2,4,4))
LocOut,GlobalFeatPyramid=self.forward(x)
if GetGlobalFeat:
return LocOut,y,GlobalFeatPyramid
else:
return LocOut,y
示例10: abstract_forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import max_pool3d [as 别名]
def abstract_forward(self, x):
return x.abstractApplyLeaf("correlateMaxPool", kernel_size = self.kernel_size, stride = self.kernel_size, max_type = self.max_type, max_pool = F.max_pool3d)
示例11: test_max_pool3d
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import max_pool3d [as 别名]
def test_max_pool3d(self):
inp = torch.randn(1, 16, 16, 32, 32, device='cuda', dtype=self.dtype)
out = F.max_pool3d(inp, kernel_size=5, stride=2, padding=2, return_indices=True, ceil_mode=True)
示例12: test_max_unpool3d
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import max_pool3d [as 别名]
def test_max_unpool3d(self):
inp = torch.randn(1, 16, 8, 32, 32, device='cuda', dtype=self.dtype)
output, indices = F.max_pool3d(inp, kernel_size=5, stride=2, padding=2, return_indices=True, ceil_mode=True)
output = F.max_unpool3d(output, indices, kernel_size=2, stride=2, padding=2)
示例13: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import max_pool3d [as 别名]
def forward(self, x):
if self.downsample:
x = F.max_pool3d(x, 2)
x = self.conv(x) #increase number of channels
x = self.reversibleBlocks(x)
return x
示例14: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import max_pool3d [as 别名]
def forward(self, x):
if self.maxpool:
x = F.max_pool3d(x, 2)
doInplace = INPLACE and not self.hasDropout
x = F.leaky_relu(self.gn1(self.conv1(x)), inplace=doInplace)
if self.hasDropout:
x = self.dropout(x)
if self.secondConv:
x = F.leaky_relu(self.gn2(self.conv2(x)), inplace=INPLACE)
return x
示例15: base_test
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import max_pool3d [as 别名]
def base_test():
fc1 = nn.Linear(10,20)
fc1.weight.data.normal_(0.0,1.0)
fc1.bias.data.normal_(0.0,1.0)
fc2 = nn.Linear(20,2)
fc2.weight.data.normal_(0.0,1.0)
fc2.bias.data.normal_(0.0,1.0)
fc3 = nn.Linear(10,2)
fc3.weight.data.normal_(0.0,1.0)
fc3.bias.data.normal_(0.0,1.0)
fc4 = nn.Linear(10,2)
fc4.weight.data.normal_(0.0,1.0)
fc4.bias.data.normal_(0.0,1.0)
softmax = nn.Softmax()
model0 = lambda x: F.log_softmax(fc2(F.relu(fc1(x))))
model1 = lambda x: F.softmax(F.elu(fc3(x)))
model2 = lambda x: F.softmax(F.tanh(fc3(x)))
model3 = lambda x: F.softmax(F.sigmoid(fc3(x)))
model4 = lambda x: softmax(F.leaky_relu(fc4(x))).clone()
model5 = lambda x: softmax(F.logsigmoid(fc4(x.transpose(0,1))))
model6 = lambda x: fc3(F.max_pool2d(x.unsqueeze(dim=0),2).squeeze())
model7 = lambda x: fc3(F.max_pool2d(x.unsqueeze(dim=0),2).squeeze(dim=0))
model8 = lambda x: fc3(F.max_pool3d(x.unsqueeze(0),2).squeeze())
model9 = lambda x: fc3(F.max_pool1d(x.abs().view(1,1,-1),4).squeeze().view(10,10))
#model10 = lambda x: fc3(x.double())
#model10 = lambda x: fc3(x.view(1,10,10).select(0,0))
model10 = lambda x, y: F.softmax(F.tanh(fc3(torch.cat((x,y),1))))
data = Variable(torch.rand(10,10))
data2 = Variable(torch.rand(20,20))
data1a = Variable(torch.rand(10,5))
data1b = Variable(torch.rand(10,5))
data3 = Variable(torch.rand(2,20,20))
out = model0(data) + \
model1(data) * model2(data) / model3(data) / 2.0 + \
2.0 * model4(data) + model5(data) + 1 - 2.0 + \
model6(data2) + model7(data2) + model8(data3) + model9(data2) + model10(data1a,data1b)
out_path = 'out'
if not os.path.isdir(out_path):
os.mkdir(out_path)
uid = str(uuid.uuid4())
torch2c.compile(out,'base',os.path.join(out_path,uid),compile_test=True)