當前位置: 首頁>>代碼示例>>Python>>正文


Python utils._triple方法代碼示例

本文整理匯總了Python中torch.nn.modules.utils._triple方法的典型用法代碼示例。如果您正苦於以下問題:Python utils._triple方法的具體用法?Python utils._triple怎麽用?Python utils._triple使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn.modules.utils的用法示例。


在下文中一共展示了utils._triple方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from torch.nn.modules import utils [as 別名]
# 或者: from torch.nn.modules.utils import _triple [as 別名]
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
                 padding=0, dilation=1, groups=1, bias=True):
        super().__init__(in_channels, out_channels, kernel_size, stride=stride,
                         padding=padding, dilation=dilation, groups=groups, bias=bias)

        # If ints are given, convert them to an iterable, 1 -> [1, 1, 1].
        padding = _triple(padding)
        kernel_size = _triple(kernel_size)
        self.stride = _triple(stride)
        self.dilation = _triple(dilation)
        self.channel_shape = (out_channels, in_channels // groups)

        self.paddings = [
            (0, padding[1], padding[2]),
            (padding[0], 0, padding[2]),
            (padding[0], padding[1], 0),
        ]

        self.kernel_sizes = [
            (1, kernel_size[1], kernel_size[2]),
            (kernel_size[0], 1, kernel_size[2]),
            (kernel_size[0], kernel_size[1], 1),
        ]
        self.linear = nn.Linear(3, 1) 
開發者ID:alexandonian,項目名稱:pretorched-x,代碼行數:26,代碼來源:multiview.py

示例2: consistent_padding_with_dilation

# 需要導入模塊: from torch.nn.modules import utils [as 別名]
# 或者: from torch.nn.modules.utils import _triple [as 別名]
def consistent_padding_with_dilation(padding, dilation, dim=2):
    assert dim == 2 or dim == 3, 'Convolution layer only support 2D and 3D'
    if dim == 2:
        padding = _pair(padding)
        dilation = _pair(dilation)
    else:  # dim == 3
        padding = _triple(padding)
        dilation = _triple(dilation)

    padding = list(padding)
    for d in range(dim):
        padding[d] = dilation[d] if dilation[d] > 1 else padding[d]
    padding = tuple(padding)

    return padding, dilation 
開發者ID:DeepMotionAIResearch,項目名稱:DenseMatchingBenchmark,代碼行數:17,代碼來源:basic_layers.py

示例3: __init__

# 需要導入模塊: from torch.nn.modules import utils [as 別名]
# 或者: from torch.nn.modules.utils import _triple [as 別名]
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True):
        super(SpatioTemporalConv, self).__init__()

        # if ints are entered, convert them to iterables, 1 -> [1, 1, 1]
        kernel_size = _triple(kernel_size)
        stride = _triple(stride)
        padding = _triple(padding)

        # decomposing the parameters into spatial and temporal components by
        # masking out the values with the defaults on the axis that
        # won't be convolved over. This is necessary to avoid unintentional
        # behavior such as padding being added twice
        spatial_kernel_size =  [1, kernel_size[1], kernel_size[2]]
        spatial_stride =  [1, stride[1], stride[2]]
        spatial_padding =  [0, padding[1], padding[2]]

        temporal_kernel_size = [kernel_size[0], 1, 1]
        temporal_stride =  [stride[0], 1, 1]
        temporal_padding =  [padding[0], 0, 0]

        # compute the number of intermediary channels (M) using formula 
        # from the paper section 3.5
        intermed_channels = int(math.floor((kernel_size[0] * kernel_size[1] * kernel_size[2] * in_channels * out_channels)/(kernel_size[1]* kernel_size[2] * in_channels + kernel_size[0] * out_channels)))
        

        # the spatial conv is effectively a 2D conv due to the 
        # spatial_kernel_size, followed by batch_norm and ReLU
        self.spatial_conv = nn.Conv3d(in_channels, intermed_channels, spatial_kernel_size,
                                    stride=spatial_stride, padding=spatial_padding, bias=bias)
        self.bn = nn.BatchNorm3d(intermed_channels)
        self.relu = nn.ReLU()   ##   nn.Tanh()   or   nn.ReLU(inplace=True)


        # the temporal conv is effectively a 1D conv, but has batch norm 
        # and ReLU added inside the model constructor, not here. This is an 
        # intentional design choice, to allow this module to externally act 
        # identical to a standard Conv3D, so it can be reused easily in any 
        # other codebase
        self.temporal_conv = nn.Conv3d(intermed_channels, out_channels, temporal_kernel_size, 
                                    stride=temporal_stride, padding=temporal_padding, bias=bias) 
開發者ID:ZitongYu,項目名稱:STVEN_rPPGNet,代碼行數:42,代碼來源:STVEN.py

示例4: __init__

# 需要導入模塊: from torch.nn.modules import utils [as 別名]
# 或者: from torch.nn.modules.utils import _triple [as 別名]
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True):
        super().__init__()

        # If ints are given, convert them to an iterable, 1 -> [1, 1, 1].
        stride = _triple(stride)
        padding = _triple(padding)
        kernel_size = _triple(kernel_size)

        # Decompose the parameters into spatial and temporal components
        # by masking out the values with the defaults on the axis that
        # won't be convolved over. This is necessary to avoid aberrant
        # behavior such as padding being added twice.
        spatial_stride = [1, stride[1], stride[2]]
        spatial_padding = [0, padding[1], padding[2]]
        spatial_kernel_size = [1, kernel_size[1], kernel_size[2]]

        temporal_stride = [stride[0], 1, 1]
        temporal_padding = [padding[0], 0, 0]
        temporal_kernel_size = [kernel_size[0], 1, 1]

        # Compute the number of intermediary channels (M) using formula
        # from the paper section 3.5:
        intermed_channels = int(math.floor((kernel_size[0] * kernel_size[1] * kernel_size[2] * in_channels * out_channels) /
                                           (kernel_size[1] * kernel_size[2] * in_channels + kernel_size[0] * out_channels)))

        # The spatial conv is effectively a 2D conv due to the
        # spatial_kernel_size, followed by batch_norm and ReLU.
        self.spatial_conv = nn.Conv3d(in_channels, intermed_channels, spatial_kernel_size,
                                      stride=spatial_stride, padding=spatial_padding, bias=bias)
        self.bn = nn.BatchNorm3d(intermed_channels)
        self.relu = nn.ReLU()

        # The temporal conv is effectively a 1D conv, but has batch norm
        # and ReLU added inside the model constructor, not here. This is an
        # intentional design choice, to allow this module to externally act
        # identically to a standard Conv3D, so it can be reused easily in any other codebase
        self.temporal_conv = nn.Conv3d(intermed_channels, out_channels, temporal_kernel_size,
                                       stride=temporal_stride, padding=temporal_padding, bias=bias) 
開發者ID:alexandonian,項目名稱:pretorched-x,代碼行數:40,代碼來源:r2plus1d.py

示例5: __init__

# 需要導入模塊: from torch.nn.modules import utils [as 別名]
# 或者: from torch.nn.modules.utils import _triple [as 別名]
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True):
        super(SpatioTemporalConv, self).__init__()

        # if ints are entered, convert them to iterables, 1 -> [1, 1, 1]
        kernel_size = _triple(kernel_size)
        stride = _triple(stride)
        padding = _triple(padding)

        # decomposing the parameters into spatial and temporal components by
        # masking out the values with the defaults on the axis that
        # won't be convolved over. This is necessary to avoid unintentional
        # behavior such as padding being added twice
        spatial_kernel_size =  [1, kernel_size[1], kernel_size[2]]
        spatial_stride =  [1, stride[1], stride[2]]
        spatial_padding =  [0, padding[1], padding[2]]

        temporal_kernel_size = [kernel_size[0], 1, 1]
        temporal_stride =  [stride[0], 1, 1]
        temporal_padding =  [padding[0], 0, 0]

        # compute the number of intermediary channels (M) using formula 
        # from the paper section 3.5
        intermed_channels = int(math.floor((kernel_size[0] * kernel_size[1] * kernel_size[2] * in_channels * out_channels)/ \
                            (kernel_size[1]* kernel_size[2] * in_channels + kernel_size[0] * out_channels)))

        # the spatial conv is effectively a 2D conv due to the 
        # spatial_kernel_size, followed by batch_norm and ReLU
        self.spatial_conv = nn.Conv3d(in_channels, intermed_channels, spatial_kernel_size,
                                    stride=spatial_stride, padding=spatial_padding, bias=bias)
        self.bn = nn.BatchNorm3d(intermed_channels)
        self.relu = nn.ReLU()

        # the temporal conv is effectively a 1D conv, but has batch norm 
        # and ReLU added inside the model constructor, not here. This is an 
        # intentional design choice, to allow this module to externally act 
        # identical to a standard Conv3D, so it can be reused easily in any 
        # other codebase
        self.temporal_conv = nn.Conv3d(intermed_channels, out_channels, temporal_kernel_size, 
                                    stride=temporal_stride, padding=temporal_padding, bias=bias) 
開發者ID:irhum,項目名稱:R2Plus1D-PyTorch,代碼行數:41,代碼來源:module.py

示例6: __init__

# 需要導入模塊: from torch.nn.modules import utils [as 別名]
# 或者: from torch.nn.modules.utils import _triple [as 別名]
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True,
                 cuda=False, init_weight=None, init_bias=None, clip_var=None):
        kernel_size = utils._triple(kernel_size)
        stride = utils._triple(stride)
        padding = utils._triple(padding)
        dilation = utils.triple(dilation)

        super(Conv3dGroupNJ, self).__init__(
            in_channels, out_channels, kernel_size, stride, padding, dilation,
            False, utils._pair(0), groups, bias, init_weight, init_bias, cuda, clip_var) 
開發者ID:KarenUllrich,項目名稱:Tutorial_BayesianCompressionForDL,代碼行數:12,代碼來源:BayesianLayers.py

示例7: __init__

# 需要導入模塊: from torch.nn.modules import utils [as 別名]
# 或者: from torch.nn.modules.utils import _triple [as 別名]
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=False):
        super(SpatioTemporalConv, self).__init__()

        # if ints are entered, convert them to iterables, 1 -> [1, 1, 1]
        kernel_size = _triple(kernel_size)
        stride = _triple(stride)
        padding = _triple(padding)


        self.temporal_spatial_conv = nn.Conv3d(in_channels, out_channels, kernel_size,
                                    stride=stride, padding=padding, bias=bias)
        self.bn = nn.BatchNorm3d(out_channels)
        self.relu = nn.ReLU() 
開發者ID:jfzhang95,項目名稱:pytorch-video-recognition,代碼行數:15,代碼來源:R3D_model.py

示例8: __init__

# 需要導入模塊: from torch.nn.modules import utils [as 別名]
# 或者: from torch.nn.modules.utils import _triple [as 別名]
def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 padding=0,
                 dilation=1,
                 num_deformable_groups=1,
                 im2col_step=64,
                 bias=True):
        super(TrajConv, self).__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = _triple(kernel_size)
        self.stride = _triple(stride)
        self.padding = _triple(padding)
        self.dilation = _triple(dilation)
        self.num_deformable_groups = num_deformable_groups
        self.im2col_step = im2col_step

        self.weight = nn.Parameter(
            torch.Tensor(out_channels, in_channels, *self.kernel_size))
        if bias:
            self.bias = nn.Parameter(
                torch.Tensor(out_channels,))
        else:
            self.bias = nn.Parameter(
                torch.zeros(0,))

        self.reset_parameters() 
開發者ID:open-mmlab,項目名稱:mmaction,代碼行數:32,代碼來源:traj_conv.py

示例9: __init__

# 需要導入模塊: from torch.nn.modules import utils [as 別名]
# 或者: from torch.nn.modules.utils import _triple [as 別名]
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True):
        super(SpatioTemporalConv, self).__init__()

		
        # if ints are entered, convert them to iterables, 1 -> [1, 1, 1]
        kernel_size = _triple(kernel_size)
        stride = _triple(stride)
        padding = _triple(padding)

        # decomposing the parameters into spatial and temporal components by
        # masking out the values with the defaults on the axis that
        # won't be convolved over. This is necessary to avoid unintentional
        # behavior such as padding being added twice
        spatial_kernel_size =  [1, kernel_size[1], kernel_size[2]]
        spatial_stride =  [1, stride[1], stride[2]]
        spatial_padding =  [0, padding[1], padding[2]]

        temporal_kernel_size = [kernel_size[0], 1, 1]
        temporal_stride =  [stride[0], 1, 1]
        temporal_padding =  [padding[0], 0, 0]

        # compute the number of intermediary channels (M) using formula 
        # from the paper section 3.5
        intermed_channels = int(math.floor((kernel_size[0] * kernel_size[1] * kernel_size[2] * in_channels * out_channels)/(kernel_size[1]* kernel_size[2] * in_channels + kernel_size[0] * out_channels)))
        
        # self-definition
        #intermed_channels = int((in_channels+intermed_channels)/2)

        # the spatial conv is effectively a 2D conv due to the 
        # spatial_kernel_size, followed by batch_norm and ReLU
        self.spatial_conv = nn.Conv3d(in_channels, intermed_channels, spatial_kernel_size,
                                    stride=spatial_stride, padding=spatial_padding, bias=bias)
        self.bn = nn.BatchNorm3d(intermed_channels)
        self.relu = nn.ReLU()   ##   nn.Tanh()   or   nn.ReLU(inplace=True)


        # the temporal conv is effectively a 1D conv, but has batch norm 
        # and ReLU added inside the model constructor, not here. This is an 
        # intentional design choice, to allow this module to externally act 
        # identical to a standard Conv3D, so it can be reused easily in any 
        # other codebase
        self.temporal_conv = nn.Conv3d(intermed_channels, out_channels, temporal_kernel_size, 
                                    stride=temporal_stride, padding=temporal_padding, bias=bias) 
開發者ID:ZitongYu,項目名稱:STVEN_rPPGNet,代碼行數:45,代碼來源:rPPGNet.py

示例10: forward

# 需要導入模塊: from torch.nn.modules import utils [as 別名]
# 或者: from torch.nn.modules.utils import _triple [as 別名]
def forward(ctx,
                input,
                offset,
                weight,
                bias,
                stride=1,
                padding=0,
                dilation=1,
                deformable_groups=1,
                im2col_step=64):
        if input is not None and input.dim() != 5:
            raise ValueError(
                "Expected 5D tensor as input, got {}D tensor instead.".format(
                    input.dim()))
        ctx.stride = _triple(stride)
        ctx.padding = _triple(padding)
        ctx.dilation = _triple(dilation)
        ctx.deformable_groups = deformable_groups
        ctx.im2col_step = im2col_step

        ctx.save_for_backward(input, offset, weight, bias)

        output = input.new(*TrajConvFunction._output_size(
            input, weight, ctx.padding, ctx.dilation, ctx.stride))

        ctx.bufs_ = [input.new(), input.new()]  # columns, ones

        if not input.is_cuda:
            raise NotImplementedError
        else:
            if isinstance(input, torch.autograd.Variable):
                if not (isinstance(input.data, torch.cuda.FloatTensor) or isinstance(input.data, torch.cuda.DoubleTensor)):
                    raise NotImplementedError
            else:
                if not (isinstance(input, torch.cuda.FloatTensor) or isinstance(input, torch.cuda.DoubleTensor)):
                    raise NotImplementedError

            cur_im2col_step = min(ctx.im2col_step, input.shape[0])
            assert (input.shape[0] %
                    cur_im2col_step) == 0, 'im2col step must divide batchsize'
            traj_conv_cuda.deform_3d_conv_forward_cuda(
                input, weight, bias, offset, output, ctx.bufs_[0], ctx.bufs_[1],
                weight.size(2), weight.size(3), weight.size(4),
                ctx.stride[0], ctx.stride[1], ctx.stride[2],
                ctx.padding[0], ctx.padding[1], ctx.padding[2],
                ctx.dilation[0], ctx.dilation[1], ctx.dilation[2], ctx.deformable_groups, cur_im2col_step)
        return output 
開發者ID:open-mmlab,項目名稱:mmaction,代碼行數:49,代碼來源:traj_conv.py


注:本文中的torch.nn.modules.utils._triple方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。