當前位置: 首頁>>代碼示例>>Python>>正文


Python conv.conv2d方法代碼示例

本文整理匯總了Python中theano.tensor.nnet.conv.conv2d方法的典型用法代碼示例。如果您正苦於以下問題:Python conv.conv2d方法的具體用法?Python conv.conv2d怎麽用?Python conv.conv2d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在theano.tensor.nnet.conv的用法示例。


在下文中一共展示了conv.conv2d方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: model

# 需要導入模塊: from theano.tensor.nnet import conv [as 別名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 別名]
def model(X, w, w2, w3, w4, p_drop_conv, p_drop_hidden):
    l1a = rectify(conv2d(X, w, border_mode='full'))
    l1 = max_pool_2d(l1a, (2, 2))
    l1 = dropout(l1, p_drop_conv)

    l2a = rectify(conv2d(l1, w2))
    l2 = max_pool_2d(l2a, (2, 2))
    l2 = dropout(l2, p_drop_conv)

    l3a = rectify(conv2d(l2, w3))
    l3b = max_pool_2d(l3a, (2, 2))
    l3 = T.flatten(l3b, outdim=2)
    l3 = dropout(l3, p_drop_conv)

    l4 = rectify(T.dot(l3, w4))
    l4 = dropout(l4, p_drop_hidden)

    pyx = softmax(T.dot(l4, w_o))
    return l1, l2, l3, l4, pyx 
開發者ID:Newmu,項目名稱:Theano-Tutorials,代碼行數:21,代碼來源:5_convolutional_net.py

示例2: local_conv2d_cpu

# 需要導入模塊: from theano.tensor.nnet import conv [as 別名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 別名]
def local_conv2d_cpu(node):

    if not isinstance(node.op, AbstractConv2d):
        return None

    img, kern = node.inputs
    if ((not isinstance(img.type, TensorType) or
         not isinstance(kern.type, TensorType))):
        return None
    if node.op.border_mode not in ['full', 'valid']:
        return None
    if not node.op.filter_flip:
        # Not tested yet
        return None

    rval = conv2d(img, kern,
                  node.op.imshp, node.op.kshp,
                  border_mode=node.op.border_mode,
                  subsample=node.op.subsample)

    copy_stack_trace(node.outputs[0], rval)
    return [rval] 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:24,代碼來源:opt.py

示例3: test_broadcast_grad

# 需要導入模塊: from theano.tensor.nnet import conv [as 別名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 別名]
def test_broadcast_grad():
    rng = numpy.random.RandomState(utt.fetch_seed())
    x1 = T.tensor4('x')
    x1_data = rng.randn(1, 1, 300, 300)
    sigma = T.scalar('sigma')
    sigma_data = 20
    window_radius = 3

    filter_1d = T.arange(-window_radius, window_radius+1)
    filter_1d = filter_1d.astype(theano.config.floatX)
    filter_1d = T.exp(-0.5*filter_1d**2/sigma**2)
    filter_1d = filter_1d / filter_1d.sum()

    filter_W = filter_1d.dimshuffle(['x', 'x', 0, 'x'])

    y = theano.tensor.nnet.conv2d(x1, filter_W, border_mode='full',
                                  filter_shape=[1, 1, None, None])
    theano.grad(y.sum(), sigma) 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:20,代碼來源:test_conv.py

示例4: output

# 需要導入模塊: from theano.tensor.nnet import conv [as 別名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 別名]
def output(self, input=None, dropout_active=True, *args, **kwargs):
        if input == None:
            input = self.input_layer.output(dropout_active=dropout_active, *args, **kwargs)

        if dropout_active and (self.dropout > 0.):
            retain_prob = 1 - self.dropout
            if self.dropout_tied:
                # tying of the dropout masks across the entire feature maps, so broadcast across the feature maps.
                mask = srng.binomial((input.shape[0], input.shape[1]), p=retain_prob, dtype='int32').astype('float32').dimshuffle(0, 1, 'x', 'x')
            else:
                mask = srng.binomial(input.shape, p=retain_prob, dtype='int32').astype('float32')
                # apply the input mask and rescale the input accordingly. By doing this it's no longer necessary to rescale the weights at test time.
            input = input / retain_prob * mask

        if self.border_mode in ['valid', 'full']:
            conved = conv2d(input, self.W, subsample=(1, 1), image_shape=self.input_shape, filter_shape=self.filter_shape, border_mode=self.border_mode)
        elif self.border_mode == 'same':
            conved = conv2d(input, self.W, subsample=(1, 1), image_shape=self.input_shape, filter_shape=self.filter_shape, border_mode='full')
            shift_x = (self.filter_width - 1) // 2
            shift_y = (self.filter_height - 1) // 2
            conved = conved[:, :, shift_x:self.input_shape[2] + shift_x, shift_y:self.input_shape[3] + shift_y]
        else:
            raise RuntimeError("Invalid border mode: '%s'" % self.border_mode)
        return self.nonlinearity(conved + self.b.dimshuffle('x', 0, 'x', 'x')) 
開發者ID:benanne,項目名稱:kaggle-galaxies,代碼行數:26,代碼來源:layers.py

示例5: predict

# 需要導入模塊: from theano.tensor.nnet import conv [as 別名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 別名]
def predict(self, new_data, batch_size):
        """
        predict for new data
        """
        img_shape = None#(batch_size, 1, self.image_shape[2], self.image_shape[3])
        conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
        if self.non_linear=="tanh":
            conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
        if self.non_linear=="relu":
            conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
        else:
            pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
            output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
        return output 
開發者ID:SenticNet,項目名稱:personality-detection,代碼行數:18,代碼來源:conv_net_classes.py

示例6: predict

# 需要導入模塊: from theano.tensor.nnet import conv [as 別名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 別名]
def predict(self, new_data, batch_size):
        """
        predict for new data
        """
        img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3])
        conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
        if self.non_linear=="tanh":
            conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
        if self.non_linear=="relu":
            conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
        else:
            pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
            output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
        return output 
開發者ID:UKPLab,項目名稱:deeplearning4nlp-tutorial,代碼行數:18,代碼來源:conv_net_classes.py

示例7: lmul_T

# 需要導入模塊: from theano.tensor.nnet import conv [as 別名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 別名]
def lmul_T(self, x):
        """
        .. todo::

            WRITEME
        """
        # dot(x, A.T)
        dummy_v = tensor.tensor4()
        z_hs = conv2d(dummy_v, self._filters,
                image_shape=self._img_shape,
                filter_shape=self._filters_shape,
                subsample=self._subsample,
                border_mode=self._border_mode,
                )
        xfilters, xdummy = z_hs.owner.op.grad((dummy_v, self._filters), (x,))
        return xfilters 
開發者ID:zchengquan,項目名稱:TextDetector,代碼行數:18,代碼來源:conv2d.py

示例8: forward

# 需要導入模塊: from theano.tensor.nnet import conv [as 別名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 別名]
def forward(self, inputs):
        # if padding is greater than zero, we insert the inputs into
        # the center of a larger zero array, effectively adding zero
        # borders
        if self.pad > 0:
            padded_inputs = T.set_subtensor(
                T.zeros((inputs.shape[0],
                         self.inputs_shape[1],
                         self.inputs_shape[2] + 2 * self.pad,
                         self.inputs_shape[3] + 2 * self.pad),
                        dtype=inputs.dtype)[:, :, self.pad:-self.pad, self.pad:-self.pad],
                inputs)
        else:
            padded_inputs = inputs
        padded_inputs_shape = (
            None,
            self.inputs_shape[1],
            self.inputs_shape[2] + 2 * self.pad,
            self.inputs_shape[3] + 2 * self.pad)

        return conv.conv2d(
            input=padded_inputs,
            filters=self.W,
            filter_shape=self.filter_shape,
            image_shape=padded_inputs_shape) 
開發者ID:saebrahimi,項目名稱:RATM,代碼行數:27,代碼來源:layers.py

示例9: setUp

# 需要導入模塊: from theano.tensor.nnet import conv [as 別名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 別名]
def setUp(self):
        super(TestConv2D, self).setUp()
        self.input = T.tensor4('input', dtype=self.dtype)
        self.input.name = 'default_V'
        self.filters = T.tensor4('filters', dtype=self.dtype)
        self.filters.name = 'default_filters'
        if not conv.imported_scipy_signal and theano.config.cxx == "":
            raise SkipTest("conv2d tests need SciPy or a c++ compiler") 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:10,代碼來源:test_conv.py

示例10: speed

# 需要導入模塊: from theano.tensor.nnet import conv [as 別名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 別名]
def speed(self):
        n_calls = 20000
        print("n_calls", n_calls)
        for border_mode in ['valid', 'full']:
            print()
            print(border_mode)
            for openmp in [False, True]:
                print("OpenMP", openmp)
                image_shapes = [(1, 5, 6, 6),
                                (10, 5, 6, 6),
                                #(10, 10, 16, 16),
                                #(10, 10, 32, 32)
                ]
                print("image_shape", image_shapes)
                for image_shape in image_shapes:
                    filter_shapes = [(1, 5, 4, 4), (2, 5, 4, 4), (5, 5, 4, 4)]
                    print("filter_shapes", filter_shapes)
                    for filter_shape in filter_shapes:

                        input = theano.shared(numpy.random.random(image_shape))
                        filters = theano.shared(numpy.random.random(filter_shape))

                        output = self.conv2d(input, filters,
                                             image_shape, filter_shape,
                                             border_mode,
                                             unroll_patch=True,
                                             openmp=openmp)
                        mode = theano.Mode(linker=theano.gof.vm.VM_Linker(
                            allow_gc=False,
                            use_cloop=True))
                        theano_conv = theano.function([], output, mode=mode)
                        t1 = time.time()
                        theano_conv.fn(n_calls=n_calls)
                        t2 = time.time()
                        print(t2 - t1, end=' ')
                    print() 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:38,代碼來源:test_conv.py

示例11: set_inpt

# 需要導入模塊: from theano.tensor.nnet import conv [as 別名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 別名]
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
        self.inpt = inpt.reshape(self.image_shape)
        conv_out = conv.conv2d(
            input=self.inpt, filters=self.w, filter_shape=self.filter_shape,
            image_shape=self.image_shape)
        pooled_out = downsample.max_pool_2d(
            input=conv_out, ds=self.poolsize, ignore_border=True)
        self.output = self.activation_fn(
            pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
        self.output_dropout = self.output # no dropout in the convolutional layers 
開發者ID:dalmia,項目名稱:WannaPark,代碼行數:12,代碼來源:network3.py

示例12: conv

# 需要導入模塊: from theano.tensor.nnet import conv [as 別名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 別名]
def conv(X, w, b=None):
    # z = dnn_conv(X, w, border_mode=int(np.floor(w.get_value().shape[-1]/2.)))
    s = int(np.floor(w.get_value().shape[-1]/2.))
    z = conv2d(X, w, border_mode='full')[:, :, s:-s, s:-s]
    if b is not None:
        z += b.dimshuffle('x', 0, 'x', 'x')
    return z 
開發者ID:Ivaylo-Popov,項目名稱:Theano-Lights,代碼行數:9,代碼來源:toolbox.py

示例13: deconv

# 需要導入模塊: from theano.tensor.nnet import conv [as 別名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 別名]
def deconv(X, w, b=None):
    # z = dnn_conv(X, w, direction_hint="*not* 'forward!", border_mode=int(np.floor(w.get_value().shape[-1]/2.)))
    s = int(np.floor(w.get_value().shape[-1]/2.))
    z = conv2d(X, w, border_mode='full')[:, :, s:-s, s:-s]
    if b is not None:
        z += b.dimshuffle('x', 0, 'x', 'x')
    return z 
開發者ID:Ivaylo-Popov,項目名稱:Theano-Lights,代碼行數:9,代碼來源:toolbox.py

示例14: encoder

# 需要導入模塊: from theano.tensor.nnet import conv [as 別名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 別名]
def encoder(tparams, layer0_input, filter_shape, pool_size,
                      prefix='cnn_encoder'):
    
    """ filter_shape: (number of filters, num input feature maps, filter height,
                        filter width)
        image_shape: (batch_size, num input feature maps, image height, image width)
    """
    
    conv_out = conv.conv2d(input=layer0_input, filters=tparams[_p(prefix,'W')], 
                            filter_shape=filter_shape)
    
    conv_out_tanh = tensor.tanh(conv_out + tparams[_p(prefix,'b')].dimshuffle('x', 0, 'x', 'x'))
    output = pool.pool_2d(input=conv_out_tanh, ds=pool_size, ignore_border=True)

    return output.flatten(2) 
開發者ID:zhegan27,項目名稱:sentence_classification,代碼行數:17,代碼來源:cnn_layers.py

示例15: f_conv

# 需要導入模塊: from theano.tensor.nnet import conv [as 別名]
# 或者: from theano.tensor.nnet.conv import conv2d [as 別名]
def f_conv(self, x, spec, in_dim, weight_name):
        layer_type, dims = spec
        num_filters = dims[0]
        filter_size = (dims[1], dims[1])
        stride = (dims[2], dims[2])

        bm = 'full' if 'convf' in layer_type else 'valid'

        num_channels = in_dim[0]

        W = self.weight(self.rand_init_conv(
            (num_filters, num_channels) + filter_size), weight_name)

        if stride != (1, 1):
            f = GpuCorrMM(subsample=stride, border_mode=bm, pad=(0, 0))
            y = f(gpu_contiguous(x), gpu_contiguous(W))
        else:
            assert self.p.batch_size == self.p.valid_batch_size
            y = conv2d(x, W, image_shape=(2*self.p.batch_size, ) + in_dim,
                       filter_shape=((num_filters, num_channels) +
                                     filter_size), border_mode=bm)
        output_size = ((num_filters,) +
                       ConvOp.getOutputShape(in_dim[1:], filter_size,
                                             stride, bm))

        return y, output_size 
開發者ID:CuriousAI,項目名稱:ladder,代碼行數:28,代碼來源:ladder.py


注:本文中的theano.tensor.nnet.conv.conv2d方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。