本文整理匯總了Python中torch.nn.AdaptiveMaxPool2d方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.AdaptiveMaxPool2d方法的具體用法?Python nn.AdaptiveMaxPool2d怎麽用?Python nn.AdaptiveMaxPool2d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.AdaptiveMaxPool2d方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: add_flops_counter_hook_function
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool2d [as 別名]
def add_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
return
if isinstance(module, torch.nn.Conv2d):
handle = module.register_forward_hook(conv_flops_counter_hook)
elif isinstance(module, (torch.nn.ReLU, torch.nn.PReLU, torch.nn.ELU,
torch.nn.LeakyReLU, torch.nn.ReLU6)):
handle = module.register_forward_hook(relu_flops_counter_hook)
elif isinstance(module, torch.nn.Linear):
handle = module.register_forward_hook(linear_flops_counter_hook)
elif isinstance(module, (torch.nn.AvgPool2d, torch.nn.MaxPool2d, nn.AdaptiveMaxPool2d,
nn.AdaptiveAvgPool2d)):
handle = module.register_forward_hook(pool_flops_counter_hook)
elif isinstance(module, torch.nn.BatchNorm2d):
handle = module.register_forward_hook(bn_flops_counter_hook)
elif isinstance(module, torch.nn.Upsample):
handle = module.register_forward_hook(upsample_flops_counter_hook)
else:
handle = module.register_forward_hook(empty_flops_counter_hook)
module.__flops_handle__ = handle
示例2: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool2d [as 別名]
def __init__(self, owner, backbone, args, input_dim):
super().__init__()
self.owner = weakref.ref(owner)
self.input_dim = input_dim
self.output_dim = args['global_dim']
self.args = args
self.num_classes = owner.num_classes
self._init_fc_layer()
if args['global_max_pooling']:
self.avgpool = nn.AdaptiveMaxPool2d(1)
else:
self.avgpool = nn.AdaptiveAvgPool2d(1)
self._init_classifier()
示例3: compute_flops
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool2d [as 別名]
def compute_flops(module, inp, out):
if isinstance(module, nn.Conv2d):
return compute_Conv2d_flops(module, inp, out), 'Conv2d'
elif isinstance(module, nn.BatchNorm2d):
return compute_BatchNorm2d_flops(module, inp, out), 'BatchNorm2d'
elif isinstance(module, (
nn.AvgPool2d, nn.MaxPool2d, nn.AdaptiveAvgPool2d,
nn.AdaptiveMaxPool2d)):
return compute_Pool2d_flops(module, inp, out), 'Pool2d'
elif isinstance(module,
(nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU,
nn.Sigmoid)):
return compute_ReLU_flops(module, inp, out), 'Activation'
elif isinstance(module, nn.Upsample):
return compute_Upsample_flops(module, inp, out), 'Upsample'
elif isinstance(module, nn.Linear):
return compute_Linear_flops(module, inp, out), 'Linear'
else:
print("[Flops]: {} is not supported!".format(type(module).__name__))
return 0, -1
pass
示例4: compute_memory
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool2d [as 別名]
def compute_memory(module, inp, out):
if isinstance(module, (nn.ReLU, nn.ReLU6, nn.ELU, nn.LeakyReLU)):
return compute_ReLU_memory(module, inp, out)
elif isinstance(module, nn.PReLU):
return compute_PReLU_memory(module, inp, out)
elif isinstance(module, nn.Conv2d):
return compute_Conv2d_memory(module, inp, out)
elif isinstance(module, nn.BatchNorm2d):
return compute_BatchNorm2d_memory(module, inp, out)
elif isinstance(module, nn.Linear):
return compute_Linear_memory(module, inp, out)
elif isinstance(module, (
nn.AvgPool2d, nn.MaxPool2d, nn.AdaptiveAvgPool2d,
nn.AdaptiveMaxPool2d)):
return compute_Pool2d_memory(module, inp, out)
else:
print("[Memory]: {} is not supported!".format(type(module).__name__))
return 0, 0
pass
示例5: create_and_append_layer
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool2d [as 別名]
def create_and_append_layer(self, input_dim, layer, list_to_append_layer_to):
"""Creates and appends a layer to the list provided"""
layer_name = layer[0].lower()
assert layer_name in self.valid_cnn_hidden_layer_types, "Layer name {} not valid, use one of {}".format(
layer_name, self.valid_cnn_hidden_layer_types)
if layer_name == "conv":
list_to_append_layer_to.extend([nn.Conv2d(in_channels=input_dim[0], out_channels=layer[1], kernel_size=layer[2],
stride=layer[3], padding=layer[4])])
elif layer_name == "maxpool":
list_to_append_layer_to.extend([nn.MaxPool2d(kernel_size=layer[1],
stride=layer[2], padding=layer[3])])
elif layer_name == "avgpool":
list_to_append_layer_to.extend([nn.AvgPool2d(kernel_size=layer[1],
stride=layer[2], padding=layer[3])])
elif layer_name == "adaptivemaxpool":
list_to_append_layer_to.extend([nn.AdaptiveMaxPool2d(output_size=(layer[1], layer[2]))])
elif layer_name == "adaptiveavgpool":
list_to_append_layer_to.extend([nn.AdaptiveAvgPool2d(output_size=(layer[1], layer[2]))])
elif layer_name == "linear":
if isinstance(input_dim, tuple): input_dim = np.prod(np.array(input_dim))
list_to_append_layer_to.extend([nn.Linear(in_features=input_dim, out_features=layer[1])])
else:
raise ValueError("Wrong layer name")
input_dim = self.calculate_new_dimensions(input_dim, layer)
return input_dim
示例6: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool2d [as 別名]
def __init__(self, PS = 28):
super(YiNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 10, kernel_size=5, padding=0, bias = True),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding = 1),
nn.Conv2d(10, 20, kernel_size=5, stride=1, padding=0, bias = True),
nn.ReLU(),
nn.MaxPool2d(kernel_size=4, stride=2, padding = 2),
nn.Conv2d(20, 50, kernel_size=3, stride=1, padding=0, bias = True),
nn.ReLU(),
nn.AdaptiveMaxPool2d(1),
GHH(50, 100),
GHH(100, 2)
)
self.input_mean = 0.427117081207483
self.input_std = 0.21888339179665006;
self.PS = PS
return
示例7: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool2d [as 別名]
def __init__(self, image_size=128, c_dim = 5, net_type='vgg19', max_filters=None, global_pool='mean',use_bias=False, class_ftune = 0):
super(DiscriminatorGAP_ImageNet, self).__init__()
layers = []
nFilt = 512 if max_filters is None else max_filters
self.pnet = Vgg19(only_last=True) if net_type == 'vgg19' else None
if class_ftune > 0.:
pAll = list(self.pnet.named_parameters())
# Multiply by two for weight and bias
for pn in pAll[::-1][:2*class_ftune]:
pn[1].requires_grad = True
layers.append(nn.LeakyReLU(0.1, inplace=True))
layers.append(nn.Conv2d(512, nFilt, kernel_size=3, stride=1, padding=1))
layers.append(nn.BatchNorm2d(nFilt))
layers.append(nn.LeakyReLU(0.1, inplace=True))
layers.append(nn.Conv2d(nFilt, nFilt, kernel_size=3, stride=1, padding=1))
layers.append(nn.BatchNorm2d(nFilt))
layers.append(nn.LeakyReLU(0.1, inplace=True))
self.layers = nn.Sequential(*layers)
self.globalPool = nn.AdaptiveAvgPool2d(1) if global_pool == 'mean' else nn.AdaptiveMaxPool2d(1)
self.classifyFC = nn.Linear(nFilt, c_dim, bias=use_bias)
self.shift = torch.autograd.Variable(torch.Tensor([-.030, -.088, -.188]).view(1,3,1,1), requires_grad=False).cuda()
self.scale = torch.autograd.Variable(torch.Tensor([.458, .448, .450]).view(1,3,1,1), requires_grad=False).cuda()
self.c_dim = c_dim
示例8: add_flops_counter_hook_function
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool2d [as 別名]
def add_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
return
if isinstance(module, torch.nn.Conv2d):
handle = module.register_forward_hook(conv_flops_counter_hook)
elif isinstance(module, (torch.nn.ReLU, torch.nn.PReLU, torch.nn.ELU, \
torch.nn.LeakyReLU, torch.nn.ReLU6)):
handle = module.register_forward_hook(relu_flops_counter_hook)
elif isinstance(module, torch.nn.Linear):
handle = module.register_forward_hook(linear_flops_counter_hook)
elif isinstance(module, (torch.nn.AvgPool2d, torch.nn.MaxPool2d, nn.AdaptiveMaxPool2d, \
nn.AdaptiveAvgPool2d)):
handle = module.register_forward_hook(pool_flops_counter_hook)
elif isinstance(module, torch.nn.BatchNorm2d):
handle = module.register_forward_hook(bn_flops_counter_hook)
elif isinstance(module, torch.nn.Upsample):
handle = module.register_forward_hook(upsample_flops_counter_hook)
else:
handle = module.register_forward_hook(empty_flops_counter_hook)
module.__flops_handle__ = handle
示例9: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool2d [as 別名]
def __init__(self, opt):
super(Classifier, self).__init__()
nChannels = opt['nChannels']
num_classes = opt['num_classes']
pool_size = opt['pool_size']
pool_type = opt['pool_type'] if ('pool_type' in opt) else 'max'
nChannelsAll = nChannels * pool_size * pool_size
self.classifier = nn.Sequential()
if pool_type == 'max':
self.classifier.add_module('MaxPool', nn.AdaptiveMaxPool2d((pool_size, pool_size)))
elif pool_type == 'avg':
self.classifier.add_module('AvgPool', nn.AdaptiveAvgPool2d((pool_size, pool_size)))
self.classifier.add_module('BatchNorm', nn.BatchNorm2d(nChannels, affine=False))
self.classifier.add_module('Flatten', Flatten())
self.classifier.add_module('LiniearClassifier', nn.Linear(nChannelsAll, num_classes))
self.initilize()
示例10: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool2d [as 別名]
def __init__(self, cfg):
super().__init__()
self.register_buffer("pixel_mean", torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(1, -1, 1, 1))
self.register_buffer("pixel_std", torch.Tensor(cfg.MODEL.PIXEL_STD).view(1, -1, 1, 1))
self._cfg = cfg
# backbone
self.backbone = build_backbone(cfg)
# head
pool_type = cfg.MODEL.HEADS.POOL_LAYER
if pool_type == 'avgpool': pool_layer = FastGlobalAvgPool2d()
elif pool_type == 'maxpool': pool_layer = nn.AdaptiveMaxPool2d(1)
elif pool_type == 'gempool': pool_layer = GeneralizedMeanPoolingP()
elif pool_type == "avgmaxpool": pool_layer = AdaptiveAvgMaxPool2d()
elif pool_type == "identity": pool_layer = nn.Identity()
else:
raise KeyError(f"{pool_type} is invalid, please choose from "
f"'avgpool', 'maxpool', 'gempool', 'avgmaxpool' and 'identity'.")
in_feat = cfg.MODEL.HEADS.IN_FEAT
num_classes = cfg.MODEL.HEADS.NUM_CLASSES
self.heads = build_reid_heads(cfg, in_feat, num_classes, pool_layer)
示例11: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool2d [as 別名]
def __init__(self, opt):
super(Classifier, self).__init__()
nChannels = opt['nChannels']
num_classes = opt['num_classes']
pool_size = opt['pool_size']
pool_type = opt['pool_type'] if ('pool_type' in opt) else 'max'
nChannelsAll = nChannels * pool_size * pool_size
self.classifier = nn.Sequential()
if pool_type == 'max':
self.classifier.add_module('MaxPool', nn.AdaptiveMaxPool2d((pool_size, pool_size)))
elif pool_type == 'avg':
self.classifier.add_module('AvgPool', nn.AdaptiveAvgPool2d((pool_size, pool_size)))
self.classifier.add_module('BatchNorm', nn.BatchNorm2d(nChannels, affine=False))
self.classifier.add_module('Flatten', Flatten())
self.classifier.add_module('LiniearClassifier', nn.Linear(nChannelsAll, num_classes))
self.initilize()
示例12: test_modify_pool
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool2d [as 別名]
def test_modify_pool(net, img_size):
"""Test ability to modify pooling module of network"""
class AdaptiveMaxAvgPool(nn.Module):
def __init__(self):
super().__init__()
self.ada_avgpool = nn.AdaptiveAvgPool2d(1)
self.ada_maxpool = nn.AdaptiveMaxPool2d(1)
def forward(self, x):
avg_x = self.ada_avgpool(x)
max_x = self.ada_maxpool(x)
x = torch.cat((avg_x, max_x), dim=1)
return x
avg_pooling = AdaptiveMaxAvgPool()
fc = nn.Linear(net._fc.in_features * 2, net._global_params.num_classes)
net._avg_pooling = avg_pooling
net._fc = fc
data = torch.zeros((2, 3, img_size, img_size))
output = net(data)
assert not torch.isnan(output).any()
示例13: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool2d [as 別名]
def __init__(self, gpu=None):
super(Embedder, self).__init__()
self.conv1 = ResidualBlockDown(6, 64)
self.conv2 = ResidualBlockDown(64, 128)
self.conv3 = ResidualBlockDown(128, 256)
self.att = SelfAttention(256)
self.conv4 = ResidualBlockDown(256, 512)
self.conv5 = ResidualBlockDown(512, 512)
self.conv6 = ResidualBlockDown(512, 512)
self.pooling = nn.AdaptiveMaxPool2d((1, 1))
self.apply(weights_init)
self.gpu = gpu
if gpu is not None:
self.cuda(gpu)
示例14: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool2d [as 別名]
def __init__(self, in_channels, out_channels, bias=True, nonlinear=True):
super(AttentionLayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.max_pool = nn.AdaptiveMaxPool2d(1)
self.softmax = nn.Softmax(dim=1)
self.w0 = nn.Parameter(torch.ones(in_channels,1), requires_grad=True)
self.w1 = nn.Parameter(torch.ones(in_channels,1), requires_grad=True)
self.w2 = nn.Parameter(torch.ones(in_channels,1), requires_grad=True)
self.bias0 = nn.Parameter(torch.zeros(1,in_channels,1,1), requires_grad=True)
self.bias1 = nn.Parameter(torch.zeros(1,in_channels,1,1), requires_grad=True)
self.bias2 = nn.Parameter(torch.zeros(1,in_channels,1,1), requires_grad=True)
nn.init.xavier_uniform_(self.w0)
nn.init.xavier_uniform_(self.w1)
nn.init.xavier_uniform_(self.w2)
# self.tanh = nn.Tanh()
示例15: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import AdaptiveMaxPool2d [as 別名]
def __init__(self, in_planes, ratio=16):
super(ChannelAttention, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.max_pool = nn.AdaptiveMaxPool2d(1)
self.fc1 = nn.Conv2d(in_planes, in_planes // ratio, 1, bias=False)
self.relu1 = nn.ReLU()
self.fc2 = nn.Conv2d(in_planes // ratio, in_planes, 1, bias=False)
self.sigmoid = nn.Sigmoid()