本文整理汇总了Python中torch.nn.functional.adaptive_max_pool2d方法的典型用法代码示例。如果您正苦于以下问题:Python functional.adaptive_max_pool2d方法的具体用法?Python functional.adaptive_max_pool2d怎么用?Python functional.adaptive_max_pool2d使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.adaptive_max_pool2d方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: apply
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import adaptive_max_pool2d [as 别名]
def apply(features: Tensor, proposal_bboxes: Tensor, proposal_batch_indices: Tensor, mode: Mode) -> Tensor:
_, _, feature_map_height, feature_map_width = features.shape
scale = 1 / 16
output_size = (7 * 2, 7 * 2)
if mode == Pooler.Mode.POOLING:
pool = []
for (proposal_bbox, proposal_batch_index) in zip(proposal_bboxes, proposal_batch_indices):
start_x = max(min(round(proposal_bbox[0].item() * scale), feature_map_width - 1), 0) # [0, feature_map_width)
start_y = max(min(round(proposal_bbox[1].item() * scale), feature_map_height - 1), 0) # (0, feature_map_height]
end_x = max(min(round(proposal_bbox[2].item() * scale) + 1, feature_map_width), 1) # [0, feature_map_width)
end_y = max(min(round(proposal_bbox[3].item() * scale) + 1, feature_map_height), 1) # (0, feature_map_height]
roi_feature_map = features[proposal_batch_index, :, start_y:end_y, start_x:end_x]
pool.append(F.adaptive_max_pool2d(input=roi_feature_map, output_size=output_size))
pool = torch.stack(pool, dim=0)
elif mode == Pooler.Mode.ALIGN:
pool = ROIAlign(output_size, spatial_scale=scale, sampling_ratio=0)(
features,
torch.cat([proposal_batch_indices.view(-1, 1).float(), proposal_bboxes], dim=1)
)
else:
raise ValueError
pool = F.max_pool2d(input=pool, kernel_size=2, stride=2)
return pool
示例2: __init__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import adaptive_max_pool2d [as 别名]
def __init__(
self, max_disp, start_disp=0,
dilation=1, weights=None,
focal_coefficient=0.0,
sparse=False
):
self.max_disp = max_disp
self.start_disp = start_disp
self.end_disp = self.max_disp + self.start_disp - 1
self.dilation = dilation
self.weights = weights
self.focal_coefficient = focal_coefficient
self.sparse = sparse
if sparse:
# sparse disparity ==> max_pooling
self.scale_func = F.adaptive_max_pool2d
else:
# dense disparity ==> avg_pooling
self.scale_func = F.adaptive_avg_pool2d
示例3: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import adaptive_max_pool2d [as 别名]
def forward(self, x):
f, class_f = self.feats(x)
p = self.psp(f)
p = self.drop_1(p)
p = self.up_1(p)
p = self.drop_2(p)
p = self.up_2(p)
p = self.drop_2(p)
p = self.up_3(p)
p = self.drop_2(p)
auxiliary = F.adaptive_max_pool2d(input=class_f, output_size=(1, 1)).view(-1, class_f.size(1))
return self.final(p), self.classifier(auxiliary)
示例4: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import adaptive_max_pool2d [as 别名]
def forward(self, inputs):
scaled_inputs = None
if self.scale:
scaled_inputs = func.max_pool2d(inputs, self.scale)
elif self.size:
scaled_inputs = func.adaptive_max_pool2d(inputs, self.size)
else:
scaled_inputs = inputs
query = self.query(inputs).view(inputs.size(0), self.attention_size, -1)
key = self.key(scaled_inputs).view(scaled_inputs.size(0), self.attention_size, -1)
value = self.value(scaled_inputs).view(scaled_inputs.size(0), self.attention_size, -1)
key = key.permute(0, 2, 1)
assignment = (key @ query).softmax(dim=1)
result = value @ assignment
result = result.view(inputs.size(0), self.attention_size, *inputs.shape[2:])
return self.project(result) + inputs
示例5: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import adaptive_max_pool2d [as 别名]
def forward(self, x, feature=False):
features = self.features(x)
out = F.relu(features, inplace=True)
out = F.adaptive_max_pool2d(out, (1, 1)).view(features.size(0), -1)
# out = F.adaptive_avg_pool2d(out, (1, 1)).view(features.size(0), -1)
if self.classifier is None:
if feature:
return out, None
else:
return out
if feature:
out1 = self.classifier(out)
return out, out1
out = self.classifier(out)
return out
示例6: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import adaptive_max_pool2d [as 别名]
def forward(self, x):
s0 = s1 = self.stem(x)
pre_layers = [s1]
for i, cell in enumerate(self.cells):
weights = []
n = 2
start = 0
for _ in range(self._steps):
end = start + n
for j in range(start, end):
weights.append(F.softmax(self.alphas_normal[j], dim=-1))
start = end
n += 1
selected_idxs = self.normal_selected_idxs
s0, s1 = s1, cell(s0, s1, weights, selected_idxs)
pre_layers.append(s1)
fusion = torch.cat(pre_layers, dim=1)
fusion = self.fusion_conv(fusion)
x1 = F.adaptive_max_pool2d(fusion, 1)
x2 = F.adaptive_avg_pool2d(fusion, 1)
logits = self.classifier(torch.cat((x1, x2), dim=1))
return logits.squeeze(-1).squeeze(-1)
示例7: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import adaptive_max_pool2d [as 别名]
def forward(self, x):
logits_aux = None
s0 = s1 = self.stem(x)
pre_layers = [s1]
for i, cell in enumerate(self.cells):
s0, s1 = s1, cell(s0, s1, self.drop_path_prob)
pre_layers.append(s1)
if i == 2 * self._layers // 3:
if self._auxiliary and self.training:
logits_aux = self.auxiliary_head(s1).squeeze(-1).squeeze(-1)
fusion = torch.cat(pre_layers, dim=1)
fusion = self.fusion_conv(fusion)
x1 = F.adaptive_max_pool2d(fusion, 1)
x2 = F.adaptive_avg_pool2d(fusion, 1)
logits = self.classifier(torch.cat((x1, x2), dim=1)).squeeze(-1).squeeze(-1)
return logits, logits_aux
示例8: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import adaptive_max_pool2d [as 别名]
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == self.num_levels
# step 1: gather multi-level features by resize and average
feats = []
gather_size = inputs[self.refine_level].size()[2:]
for i in range(self.num_levels):
if i < self.refine_level:
gathered = F.adaptive_max_pool2d(
inputs[i], output_size=gather_size)
else:
gathered = F.interpolate(
inputs[i], size=gather_size, mode='nearest')
feats.append(gathered)
bsf = sum(feats) / len(feats)
# step 2: refine gathered features
if self.refine_type is not None:
bsf = self.refine(bsf)
# step 3: scatter refined features to multi-levels by a residual path
outs = []
for i in range(self.num_levels):
out_size = inputs[i].size()[2:]
if i < self.refine_level:
residual = F.interpolate(bsf, size=out_size, mode='nearest')
else:
residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
outs.append(residual + inputs[i])
return tuple(outs)
示例9: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import adaptive_max_pool2d [as 别名]
def forward(self, inputs):
assert len(inputs) == self.num_levels
# step 1: gather multi-level features by resize and average
feats = []
gather_size = inputs[self.refine_level].size()[2:]
for i in range(self.num_levels):
if i < self.refine_level:
gathered = F.adaptive_max_pool2d(
inputs[i], output_size=gather_size)
else:
gathered = F.interpolate(
inputs[i], size=gather_size, mode='nearest')
feats.append(gathered)
bsf = sum(feats) / len(feats)
# step 2: refine gathered features
if self.refine_type is not None:
bsf = self.refine(bsf)
# step 3: scatter refined features to multi-levels by a residual path
outs = []
for i in range(self.num_levels):
out_size = inputs[i].size()[2:]
if i < self.refine_level:
residual = F.interpolate(bsf, size=out_size, mode='nearest')
else:
residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
outs.append(residual + inputs[i])
return tuple(outs)
示例10: __init__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import adaptive_max_pool2d [as 别名]
def __init__(self, max_disp, start_disp=0, weights=None, sparse=False):
self.max_disp = max_disp
self.weights = weights
self.start_disp = start_disp
self.sparse = sparse
if sparse:
# sparse disparity ==> max_pooling
self.scale_func = F.adaptive_max_pool2d
else:
# dense disparity ==> avg_pooling
self.scale_func = F.adaptive_avg_pool2d
示例11: __init__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import adaptive_max_pool2d [as 别名]
def __init__(self, max_disp, start_disp=0, weights=None, sparse=False):
self.max_disp = max_disp
self.start_disp = start_disp
self.weights = weights
self.sparse = sparse
if sparse:
# sparse disparity ==> max_pooling
self.scale_func = F.adaptive_max_pool2d
else:
# dense disparity ==> avg_pooling
self.scale_func = F.adaptive_avg_pool2d
示例12: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import adaptive_max_pool2d [as 别名]
def forward(self, x, l):
"""
:param x: Input sequence.
:param l: Current convolutional layers.
"""
s = x.size()[3]
k_ll = ((self.L - l) / self.L) * s
k_l = int(round(max(self.k_top, np.ceil(k_ll))))
out = F.adaptive_max_pool2d(x, (x.size()[2], k_l))
return out
示例13: __getitem__
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import adaptive_max_pool2d [as 别名]
def __getitem__(self, index):
position = self.indices[index]
img, _ = self.data[position]
img = torch.tensor(np.array(img)).permute(2, 0, 1).to(torch.float) / 255
edge = img[:, :, :256].unsqueeze(0)
shoe = img[:, :, 256:].unsqueeze(0)
edge = func.adaptive_max_pool2d(1 - edge, (28, 28))
shoe = func.adaptive_avg_pool2d(shoe, (28, 28))
return edge[0], shoe[0]
示例14: roi_pooling
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import adaptive_max_pool2d [as 别名]
def roi_pooling(input, rois, size=(7, 7), spatial_scale=1.0):
assert (rois.dim() == 2)
assert (rois.size(1) == 5)
output = []
rois = rois.data.float()
num_rois = rois.size(0)
rois[:, 1:].mul_(spatial_scale)
rois = rois.long()
for i in range(num_rois):
roi = rois[i]
im_idx = roi[0]
if roi[1] >= input.size(3) or roi[2] >= input.size(2) or roi[1] < 0 or roi[2] < 0:
# print(f"Runtime Warning: roi top left corner out of range: {roi}", file=sys.stderr)
roi[1] = torch.clamp(roi[1], 0, input.size(3) - 1)
roi[2] = torch.clamp(roi[2], 0, input.size(2) - 1)
if roi[3] >= input.size(3) or roi[4] >= input.size(2) or roi[3] < 0 or roi[4] < 0:
# print(f"Runtime Warning: roi bottom right corner out of range: {roi}", file=sys.stderr)
roi[3] = torch.clamp(roi[3], 0, input.size(3) - 1)
roi[4] = torch.clamp(roi[4], 0, input.size(2) - 1)
if (roi[3:5] - roi[1:3] < 0).any():
# print(f"Runtime Warning: invalid roi: {roi}", file=sys.stderr)
im = input.new_full((1, input.size(1), 1, 1), 0)
else:
im = input.narrow(0, im_idx, 1)[..., roi[2]:(roi[4] + 1), roi[1]:(roi[3] + 1)]
output.append(F.adaptive_max_pool2d(im, size))
return torch.cat(output, 0)
示例15: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import adaptive_max_pool2d [as 别名]
def forward(self, x):
b, c = x.size()[:2]
aa = F.adaptive_avg_pool2d(x, 1).view(b, c)
aa = self.mlp(aa)
am = F.adaptive_max_pool2d(x, 1).view(b, c)
am = self.mlp(am)
a = torch.sigmoid(aa + am).view(b, c, 1, 1)
return x * a