當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.ReLU6方法代碼示例

本文整理匯總了Python中torch.nn.ReLU6方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.ReLU6方法的具體用法?Python nn.ReLU6怎麽用?Python nn.ReLU6使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn的用法示例。


在下文中一共展示了nn.ReLU6方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: compute_madd

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReLU6 [as 別名]
def compute_madd(module, inp, out):
    if isinstance(module, nn.Conv2d):
        return compute_Conv2d_madd(module, inp, out)
    elif isinstance(module, nn.ConvTranspose2d):
        return compute_ConvTranspose2d_madd(module, inp, out)
    elif isinstance(module, nn.BatchNorm2d):
        return compute_BatchNorm2d_madd(module, inp, out)
    elif isinstance(module, nn.MaxPool2d):
        return compute_MaxPool2d_madd(module, inp, out)
    elif isinstance(module, nn.AvgPool2d):
        return compute_AvgPool2d_madd(module, inp, out)
    elif isinstance(module, (nn.ReLU, nn.ReLU6)):
        return compute_ReLU_madd(module, inp, out)
    elif isinstance(module, nn.Softmax):
        return compute_Softmax_madd(module, inp, out)
    elif isinstance(module, nn.Linear):
        return compute_Linear_madd(module, inp, out)
    elif isinstance(module, nn.Bilinear):
        return compute_Bilinear_madd(module, inp[0], inp[1], out)
    else:
        return 0 
開發者ID:Tramac,項目名稱:torchscope,代碼行數:23,代碼來源:helper.py

示例2: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReLU6 [as 別名]
def __init__(self, inp, oup, stride, expand_ratio):
        super(_inverted_residual_bottleneck, self).__init__()
        self.use_res_connect = stride == 1 and inp == oup
        self.conv = nn.Sequential(
            # pw
            nn.Conv2d(inp, inp * expand_ratio, 1, 1, 0, bias=False),
            nn.BatchNorm2d(inp * expand_ratio),
            nn.ReLU6(inplace=True),
            # dw
            nn.Conv2d(inp * expand_ratio, inp * expand_ratio, 3, stride, 1, groups=inp * expand_ratio, bias=False),
            nn.BatchNorm2d(inp * expand_ratio),
            nn.ReLU6(inplace=True),
            # pw-linear
            nn.Conv2d(inp * expand_ratio, oup, 1, 1, 0, bias=False),
            nn.BatchNorm2d(oup),
        )
        self.depth = oup 
開發者ID:ShuangXieIrene,項目名稱:ssds.pytorch,代碼行數:19,代碼來源:mobilenet.py

示例3: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReLU6 [as 別名]
def __init__(self, inplanes, outplanes, stride=1, dilation=1, kernel=3, groups=(1, 1), t=6, norm='bn', se_ratio=0,
                 activation=nn.ReLU6):
        super(LinearBottleneck, self).__init__()
        padding = (dilation * kernel - dilation) // 2
        self.stride = stride
        self.inplanes, self.outplanes, innerplanes = int(inplanes), int(outplanes), int(inplanes * abs(t))
        self.t = t
        if self.t != 1:
            self.conv1 = nn.Conv2d(self.inplanes, innerplanes, kernel_size=1, padding=0, stride=1, groups=groups[0],
                                   bias=False)
            self.bn1 = make_norm(innerplanes, norm=norm)
        self.conv2 = nn.Conv2d(innerplanes, innerplanes, kernel_size=kernel, padding=padding, stride=stride,
                               dilation=dilation, groups=innerplanes, bias=False)
        self.bn2 = make_norm(innerplanes, norm=norm)
        self.se = ops.SeConv2d(innerplanes, int(self.inplanes * se_ratio), activation) if se_ratio else None
        self.conv3 = nn.Conv2d(innerplanes, self.outplanes, kernel_size=1, padding=0, stride=1, groups=groups[1],
                               bias=False)
        self.bn3 = make_norm(self.outplanes, norm=norm)
        try:
            self.activation = activation(inplace=True)
        except:
            self.activation = activation() 
開發者ID:soeaver,項目名稱:Parsing-R-CNN,代碼行數:24,代碼來源:mobilenet_v2.py

示例4: add_conv

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReLU6 [as 別名]
def add_conv(in_ch, out_ch, ksize, stride, leaky=True):
    """
    Add a conv2d / batchnorm / leaky ReLU block.
    Args:
        in_ch (int): number of input channels of the convolution layer.
        out_ch (int): number of output channels of the convolution layer.
        ksize (int): kernel size of the convolution layer.
        stride (int): stride of the convolution layer.
    Returns:
        stage (Sequential) : Sequential layers composing a convolution block.
    """
    stage = nn.Sequential()
    pad = (ksize - 1) // 2
    stage.add_module('conv', nn.Conv2d(in_channels=in_ch,
                                       out_channels=out_ch, kernel_size=ksize, stride=stride,
                                       padding=pad, bias=False))
    stage.add_module('batch_norm', nn.BatchNorm2d(out_ch))
    if leaky:
        stage.add_module('leaky', nn.LeakyReLU(0.1))
    else:
        stage.add_module('relu6', nn.ReLU6(inplace=True))
    return stage 
開發者ID:ruinmessi,項目名稱:ASFF,代碼行數:24,代碼來源:network_blocks.py

示例5: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReLU6 [as 別名]
def __init__(self, width_mult=1, activation=nn.ReLU6(), bias=False, add_sece=False, add_partial=False,
                 image_channel=3):

        super(MobileNetV2, self).__init__()
        self.add_partial = add_partial
        # self.conv_block = Conv_block
        self.res_block = InvertedResidual if not add_partial else PartialInvertedResidual
        self.act_fn = activation
        self.bias = bias
        self.width_mult = width_mult
        self.out_stride = 32  # 1/32 of input size
        self.image_channel = image_channel
        self.inverted_residual_setting = [
            # t, c, n, s, dial
            [1, 16, 1, 1, 1],
            [6, 24, 2, 2, 1],
            [6, 32, 3, 2, 1],
            [6, 64, 4, 2, 1],
            [6, 96, 3, 1, 1],
            [6, 160, 3, 2, 1],
            [6, 320, 1, 1, 1],
        ]
        self.last_channel = 0  # last one is avg pool
        self.features = self.make_inverted_resblocks(self.inverted_residual_setting, add_sece) 
開發者ID:yu45020,項目名稱:Text_Segmentation_Image_Inpainting,代碼行數:26,代碼來源:MobileNetV2.py

示例6: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReLU6 [as 別名]
def __init__(self, inp, oup, stride, expand_ratio):
        super(InvertedResidual, self).__init__()
        self.stride = stride

        self.use_res_connect = self.stride == 1 and inp == oup

        self.conv = nn.Sequential(
            # pw
            nn.Conv2d(inp, inp * expand_ratio, 1, 1, 0, bias=False),
            nn.BatchNorm2d(inp * expand_ratio),
            nn.ReLU6(inplace=True),
            # dw
            nn.Conv2d(inp * expand_ratio, inp * expand_ratio, 3, stride, 1, groups=inp * expand_ratio, bias=False),
            nn.BatchNorm2d(inp * expand_ratio),
            nn.ReLU6(inplace=True),
            # pw-linear
            nn.Conv2d(inp * expand_ratio, oup, 1, 1, 0, bias=False),
            nn.BatchNorm2d(oup),
        ) 
開發者ID:ericsun99,項目名稱:MobileNet-V2-Pytorch,代碼行數:21,代碼來源:MobileNetV2.py

示例7: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReLU6 [as 別名]
def __init__(self, block, layers, num_classes=1000):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        #self.relu = nn.ReLU6(inplace=True)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7, stride=1)
        self.fc = nn.Linear(512 * block.expansion, num_classes)


        self.set_params()
#        for m in self.modules():
#            if isinstance(m, nn.Conv2d):
#                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
#                m.weight.data.normal_(0, math.sqrt(2. / n))
#            elif isinstance(m, nn.BatchNorm2d):
#                m.weight.data.fill_(1)
#                m.bias.data.zero_() 
開發者ID:aliyun,項目名稱:alibabacloud-quantization-networks,代碼行數:27,代碼來源:resnet.py

示例8: compute_madd

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReLU6 [as 別名]
def compute_madd(module, inp, out):
    if isinstance(module, nn.Conv2d):
        return compute_Conv2d_madd(module, inp, out)
    elif isinstance(module, nn.ConvTranspose2d):
        return compute_ConvTranspose2d_madd(module, inp, out)
    elif isinstance(module, nn.BatchNorm2d):
        return compute_BatchNorm2d_madd(module, inp, out)
    elif isinstance(module, nn.MaxPool2d):
        return compute_MaxPool2d_madd(module, inp, out)
    elif isinstance(module, nn.AvgPool2d):
        return compute_AvgPool2d_madd(module, inp, out)
    elif isinstance(module, (nn.ReLU, nn.ReLU6)):
        return compute_ReLU_madd(module, inp, out)
    elif isinstance(module, nn.Softmax):
        return compute_Softmax_madd(module, inp, out)
    elif isinstance(module, nn.Linear):
        return compute_Linear_madd(module, inp, out)
    elif isinstance(module, nn.Bilinear):
        return compute_Bilinear_madd(module, inp[0], inp[1], out)
    else:
        print("[MAdd]: {} is not supported!".format(type(module).__name__))
        return 0 
開發者ID:StevenGrove,項目名稱:TreeFilter-Torch,代碼行數:24,代碼來源:compute_madd.py

示例9: compute_flops

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReLU6 [as 別名]
def compute_flops(module, inp, out):
    if isinstance(module, nn.Conv2d):
        return compute_Conv2d_flops(module, inp, out), 'Conv2d'
    elif isinstance(module, nn.BatchNorm2d):
        return compute_BatchNorm2d_flops(module, inp, out), 'BatchNorm2d'
    elif isinstance(module, (
            nn.AvgPool2d, nn.MaxPool2d, nn.AdaptiveAvgPool2d,
            nn.AdaptiveMaxPool2d)):
        return compute_Pool2d_flops(module, inp, out), 'Pool2d'
    elif isinstance(module,
                    (nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU,
                     nn.Sigmoid)):
        return compute_ReLU_flops(module, inp, out), 'Activation'
    elif isinstance(module, nn.Upsample):
        return compute_Upsample_flops(module, inp, out), 'Upsample'
    elif isinstance(module, nn.Linear):
        return compute_Linear_flops(module, inp, out), 'Linear'
    else:
        print("[Flops]: {} is not supported!".format(type(module).__name__))
        return 0, -1
    pass 
開發者ID:StevenGrove,項目名稱:TreeFilter-Torch,代碼行數:23,代碼來源:compute_flops.py

示例10: compute_memory

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReLU6 [as 別名]
def compute_memory(module, inp, out):
    if isinstance(module, (nn.ReLU, nn.ReLU6, nn.ELU, nn.LeakyReLU)):
        return compute_ReLU_memory(module, inp, out)
    elif isinstance(module, nn.PReLU):
        return compute_PReLU_memory(module, inp, out)
    elif isinstance(module, nn.Conv2d):
        return compute_Conv2d_memory(module, inp, out)
    elif isinstance(module, nn.BatchNorm2d):
        return compute_BatchNorm2d_memory(module, inp, out)
    elif isinstance(module, nn.Linear):
        return compute_Linear_memory(module, inp, out)
    elif isinstance(module, (
            nn.AvgPool2d, nn.MaxPool2d, nn.AdaptiveAvgPool2d,
            nn.AdaptiveMaxPool2d)):
        return compute_Pool2d_memory(module, inp, out)
    else:
        print("[Memory]: {} is not supported!".format(type(module).__name__))
        return 0, 0
    pass 
開發者ID:StevenGrove,項目名稱:TreeFilter-Torch,代碼行數:21,代碼來源:compute_memory.py

示例11: get_activation

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReLU6 [as 別名]
def get_activation(name):
    if isinstance(name, nn.Module):
        return name
    if name == 'default':
        return get_activation(get_default_activation())
    elif name == 'relu':
        return nn.ReLU(inplace=True)
    elif name == 'relu6':
        return nn.ReLU6(inplace=True)
    elif name == 'leaky_relu':
        return nn.LeakyReLU(negative_slope=0.1, inplace=True)
    elif name == 'sigmoid':
        return nn.Sigmoid()
    elif name == 'hswish':
        return HardSwish(inplace=True)
    elif name == 'swish':
        return Swish()
    else:
        raise NotImplementedError("No activation named %s" % name) 
開發者ID:qixuxiang,項目名稱:Pytorch_Lightweight_Network,代碼行數:21,代碼來源:modules.py

示例12: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReLU6 [as 別名]
def __init__(self, in_channels, out_channels, expansion=1, kernel_size=3,
                 stride=1, padding=1, residual_block=None):
        expanded = in_channels * expansion
        super(ExpandedConv2d, self).__init__()
        self.add_res = stride == 1 and in_channels == out_channels
        self.residual_block = residual_block
        if expanded == in_channels:
            block = []
        else:
            block = [
                nn.Conv2d(in_channels, expanded, 1, bias=False),
                nn.BatchNorm2d(expanded),
                nn.ReLU6(inplace=True),
            ]

        block += [
            nn.Conv2d(expanded, expanded, kernel_size,
                      stride=stride, padding=padding, groups=expanded, bias=False),
            nn.BatchNorm2d(expanded),
            nn.ReLU6(inplace=True),
            nn.Conv2d(expanded, out_channels, 1, bias=False),
            nn.BatchNorm2d(out_channels)
        ]

        self.block = nn.Sequential(*block) 
開發者ID:eladhoffer,項目名稱:convNet.pytorch,代碼行數:27,代碼來源:mobilenet_v2.py

示例13: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReLU6 [as 別名]
def __init__(self, act_type, auto_optimize=True, **kwargs):
        super(Activation, self).__init__()
        if act_type == 'relu':
            self.act = nn.ReLU(
                inplace=True) if auto_optimize else nn.ReLU(**kwargs)
        elif act_type == 'relu6':
            self.act = nn.ReLU6(
                inplace=True) if auto_optimize else nn.ReLU6(**kwargs)
        elif act_type == 'h_swish':
            self.act = HardSwish(
                inplace=True) if auto_optimize else HardSwish(**kwargs)
        elif act_type == 'h_sigmoid':
            self.act = HardSigmoid(
                inplace=True) if auto_optimize else HardSigmoid(**kwargs)
        elif act_type == 'swish':
            self.act = Swish(**kwargs)
        elif act_type == 'sigmoid':
            self.act = nn.Sigmoid()
        elif act_type == 'lrelu':
            self.act = nn.LeakyReLU(inplace=True, **kwargs) if auto_optimize \
                else nn.LeakyReLU(**kwargs)
        elif act_type == 'prelu':
            self.act = nn.PReLU(**kwargs)
        else:
            raise NotImplementedError(
                '{} activation is not implemented.'.format(act_type)) 
開發者ID:PistonY,項目名稱:torch-toolbox,代碼行數:28,代碼來源:activation.py

示例14: compute_flops

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReLU6 [as 別名]
def compute_flops(module, inp, out):
    if isinstance(module, nn.Conv2d):
        return compute_Conv2d_flops(module, inp, out) // 2
    elif isinstance(module, nn.BatchNorm2d):
        return compute_BatchNorm2d_flops(module, inp, out) // 2
    elif isinstance(module, (nn.AvgPool2d, nn.MaxPool2d)):
        return compute_Pool2d_flops(module, inp, out) // 2
    elif isinstance(module, (nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU)):
        return compute_ReLU_flops(module, inp, out) // 2
    elif isinstance(module, nn.Upsample):
        return compute_Upsample_flops(module, inp, out) // 2
    elif isinstance(module, nn.Linear):
        return compute_Linear_flops(module, inp, out) // 2
    else:
        return 0 
開發者ID:Tramac,項目名稱:torchscope,代碼行數:17,代碼來源:helper.py

示例15: compute_ReLU_flops

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import ReLU6 [as 別名]
def compute_ReLU_flops(module, inp, out):
    assert isinstance(module, (nn.ReLU, nn.ReLU6, nn.PReLU, nn.ELU, nn.LeakyReLU))
    batch_size = inp.size()[0]
    active_elements_count = batch_size

    for s in inp.size()[1:]:
        active_elements_count *= s

    return active_elements_count 
開發者ID:Tramac,項目名稱:torchscope,代碼行數:11,代碼來源:helper.py


注:本文中的torch.nn.ReLU6方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。