当前位置: 首页>>代码示例>>Python>>正文


Python dnn.dnn_conv3d方法代码示例

本文整理汇总了Python中theano.sandbox.cuda.dnn.dnn_conv3d方法的典型用法代码示例。如果您正苦于以下问题:Python dnn.dnn_conv3d方法的具体用法?Python dnn.dnn_conv3d怎么用?Python dnn.dnn_conv3d使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.sandbox.cuda.dnn的用法示例。


在下文中一共展示了dnn.dnn_conv3d方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_output_for

# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_conv3d [as 别名]
def get_output_for(self, input, *args, **kwargs):
        
        conv_mode = 'conv' if self.flip_filters else 'cross'

        
        # Fractionally strided convolutions
        if any([s<1.0 for s in self.strides]):
            subsample=tuple([int(1.0/s) for s in self.strides])

            img_shape = list(self.output_shape)
            if img_shape[0] is None:
                img_shape[0] = input.shape[0]
            image = T.alloc(0.,*img_shape)
            base = dnn.dnn_conv3d(img = image,
                                    kerns = self.W.transpose(1,0,2,3,4),
                                    subsample = subsample,
                                    border_mode = self.pad,
                                    conv_mode = conv_mode
                                    )                      
            conved = T.grad(base.sum(), wrt = image, known_grads = {base: input})
                                    
        else:
            conved = dnn.dnn_conv3d(img = input,
                                    kerns = self.W,
                                    subsample = self.strides,
                                    border_mode = self.pad,
                                    conv_mode = conv_mode
                                    )

        
        if self.b is None:
            activation = conved
        else:
            activation = conved + self.b.dimshuffle('x', 0, 'x', 'x', 'x')

        return self.nonlinearity(activation)

        
# Repeat upscale 3d layer 
开发者ID:ajbrock,项目名称:Generative-and-Discriminative-Voxel-Modeling,代码行数:41,代码来源:layers.py

示例2: compute_output

# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_conv3d [as 别名]
def compute_output(self, network, in_vw):
        # gather hyperparameters
        num_filters = network.find_hyperparameter(["num_filters"])
        filter_size = network.find_hyperparameter(["filter_size"])
        stride = network.find_hyperparameter(["conv_stride", "stride"],
                                             (1, 1, 1))
        pad = network.find_hyperparameter(["conv_pad", "pad"], (0, 0, 0))
        pad = conv.conv_parse_pad(filter_size, pad)
        # by default, do convolution instead of cross-correlation
        # rationale: be compatible with standard (non-cuDNN) conv2d
        conv_mode = network.find_hyperparameter(["conv_mode"], "conv")
        assert len(filter_size) == 3
        assert conv_mode in ["conv", "cross"]

        # create weight
        num_channels = in_vw.shape[1]
        W = network.create_vw(
            name="weight",
            is_shared=True,
            shape=(num_filters, num_channels) + tuple(filter_size),
            tags={"parameter", "weight"},
            default_inits=[],
        ).variable

        out_var = dnn.dnn_conv3d(img=in_vw.variable,
                                 kerns=W,
                                 border_mode=pad,
                                 subsample=stride,
                                 conv_mode=conv_mode)

        out_shape = conv.conv_output_shape(input_shape=in_vw.shape,
                                           num_filters=num_filters,
                                           axes=(2, 3, 4),
                                           conv_shape=filter_size,
                                           strides=stride,
                                           pads=pad)
        network.create_vw(
            "default",
            variable=out_var,
            shape=out_shape,
            tags={"output"},
        ) 
开发者ID:SBU-BMI,项目名称:u24_lymphocyte,代码行数:44,代码来源:dnn.py

示例3: compute_output

# 需要导入模块: from theano.sandbox.cuda import dnn [as 别名]
# 或者: from theano.sandbox.cuda.dnn import dnn_conv3d [as 别名]
def compute_output(self, network, in_vw):
        num_filters = network.find_hyperparameter(['num_filters'])
        stride = network.find_hyperparameter(['upsample_factor'])
        filter_size = network.find_hyperparameter(['filter_size'])
        pad_name = 'same'

        pad = treeano.nodes.conv.conv_parse_pad(filter_size, pad_name)
        # In the case, the 0th element of shape is the number of channels
        # in the low-res layer, and the 1st element is that of the hi-res
        # layer.  We put it in W this way, because W is a convolution from
        # hi-res to low-res.
        W = network.create_vw(
            name='weight',
            is_shared=True,
            shape=(in_vw.shape[1], num_filters,) + filter_size,
            tags={'parameter', 'weight'},
            default_inits=[],
        ).variable

        out_shape = list(in_vw.shape)
        symbolic_shape = list(in_vw.symbolic_shape())
        out_shape[1] = num_filters
        symbolic_shape[1] = num_filters
        for axis, s in zip((2, 3, 4), stride):
            if out_shape[axis] is not None:
                out_shape[axis] *= s
            symbolic_shape[axis] *= s
        out_shape = tuple(out_shape)
        symbolic_shape = tuple(symbolic_shape)

        x = T.zeros(symbolic_shape)
        conved = dnn.dnn_conv3d(img=x,
                                kerns=W,
                                border_mode=pad,
                                subsample=stride)

        out_var = T.grad(None, wrt=x, known_grads={conved: in_vw.variable})

        network.create_vw(
            'default',
            variable=out_var,
            shape=out_shape,
            tags={'output'}
        ) 
开发者ID:SBU-BMI,项目名称:u24_lymphocyte,代码行数:46,代码来源:deconv_upsample.py


注:本文中的theano.sandbox.cuda.dnn.dnn_conv3d方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。