当前位置: 首页>>代码示例>>Python>>正文


Python conv.conv2d函数代码示例

本文整理汇总了Python中theano.tensor.nnet.conv.conv2d函数的典型用法代码示例。如果您正苦于以下问题:Python conv2d函数的具体用法?Python conv2d怎么用?Python conv2d使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了conv2d函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: model

def model(X, filter_params, bias_params, p_dropout, srng):
    inp = X
    half = int(len(filter_params)/2)
    conv_params = filter_params[:half]
    deconv_params = filter_params[half:]
    conv_biases = bias_params[:half]
    deconv_biases = bias_params[half:]
    for f, b in zip(conv_params, conv_biases):
        outa = rectify(conv2d(inp, f, border_mode='valid') +
                       b.dimshuffle('x', 0, 'x', 'x'))
        outb = dropout(outa, srng, p_dropout)
        inp = outb
    c = 0
    for f, b in zip(deconv_params, deconv_biases):
        if c == len(deconv_params):
            outa = T.nnet.sigmoid(conv2d(inp, f, border_mode='full') +
                                  b.dimshuffle('x', 0, 'x', 'x'))
        else:
            outa = rectify(conv2d(inp, f, border_mode='full') +
                           b.dimshuffle('x', 0, 'x', 'x'))
        outb = dropout(outa, srng, p_dropout)
        inp = outb
        c += 1
    output = inp
    return output
开发者ID:MaGold,项目名称:Caustics,代码行数:25,代码来源:model.py

示例2: model

	def model(self, X, w1, w2, w3, w4, wo, p_drop_conv, p_drop_hidden):
		
		# print X
		l1a = self.rectify(conv2d(X, w1, border_mode = "full"))
		l1 = max_pool_2d(l1a, (2, 2))
		l1 = self.dropout(l1, p_drop_conv)
		# print np.mean(l1)
		
		l2a = self.rectify(conv2d(l1, w2))
		l2 = max_pool_2d(l2a, (2, 2))
		l2 = self.dropout(l2, p_drop_conv)
		# print np.mean(l2)

		l3a = self.rectify(conv2d(l2, w3))
		l3b = max_pool_2d(l3a, (2, 2))
		l3 = T.flatten(l3b, outdim = 2)
		l3 = self.dropout(l3, p_drop_conv)
		# print np.mean(l3)
		
		l4 = self.rectify(T.dot(l3, w4))
		l4 = self.dropout(l4, p_drop_hidden)
		# print np.mean(l4)
		# l4 = T.dot(l4, wo)
		sig = T.dot(l4, wo)
		# pyx = self.softmax(T.dot(l4, wo))
		return l1, l2, l3, l4, sig
开发者ID:ZhecanJamesWang,项目名称:conv-net-research,代码行数:26,代码来源:convnet_James_testing_parameters.py

示例3: get_theano_function

 def get_theano_function(self, input):
     print self.name
     print 'in',self.in_size
     print 'filt',self.theano_filter_shape
     print 'bias', self.bias.shape
     conv_out = None
     if self.pad[0] != 0:
         tmp_new = T.zeros_like(self.dpad)
         input = T.set_subtensor(tmp_new[:,:,self.pad[0]:(-self.pad[0]), 
                                         self.pad[3]:(-self.pad[3])], input)
     if self.groups == 1:
         conv_out = conv.conv2d(input, self.w, 
                                filter_shape=self.theano_filter_shape,
                                subsample=self.stride, 
                                image_shape=self.theano_in_size)
     else:
         in_1 = input[:,:input.shape[1]/2,:,:]
         in_2 = input[:,input.shape[1]/2:,:,:]
         fs = self.theano_filter_shape
         fs = (fs[0]/2,fs[1],fs[2],fs[3])
         conv_1 = conv.conv2d(in_1, self.w1, filter_shape=fs,
                              subsample=self.stride, 
                              image_shape=self.theano_in_size)
         conv_2 = conv.conv2d(in_2, self.w2, filter_shape=fs,
                              subsample=self.stride, 
                              image_shape=self.theano_in_size)
         conv_out = T.concatenate([conv_1, conv_2], axis=1)
     return conv_out + self.b.dimshuffle('x',0,'x','x')
开发者ID:matteopresutto,项目名称:caffe-to-theano,代码行数:28,代码来源:layers.py

示例4: model

def model(X, w, w2, w3, w4, w_o, p_drop_conv, p_drop_hidden):

    # conv + ReLU + pool
    # border_mode = full, then zero-padding, default is valid
    l1a = rectify(conv2d(X, w, border_mode='full'))
    # pooling at 2*2 kernel and select the largest in the kernel
    l1 = max_pool_2d(l1a, (2, 2))
    l1 = dropout(l1, p_drop_conv)

    # conv + ReLU + pool
    l2a = rectify(conv2d(l1, w2))
    l2 = max_pool_2d(l2a, (2, 2))
    l2 = dropout(l2, p_drop_conv)

    # conv + ReLU + pool
    l3a = rectify(conv2d(l2, w3))
    l3b = max_pool_2d(l3a, (2, 2))
    # convert a ndim array to 2 dim. if l3b dim larger than 2 then the rest dim collapsed.
    # flatten for enter the FC layer
    l3 = T.flatten(l3b, outdim=2)
    l3 = dropout(l3, p_drop_conv)

    # FC + ReLU
    l4 = rectify(T.dot(l3, w4))
    l4 = dropout(l4, p_drop_hidden)


    # output layer + softmax
    pyx = softmax(T.dot(l4, w_o))

    return l1, l2, l3, l4, pyx
开发者ID:coroner4817,项目名称:ForAWS,代码行数:31,代码来源:5_convolutional_net.py

示例5: decode

    def decode(self, hidden):
        hidden_ = T.alloc(0.,*self.hidden_shape)
        deconv_out = T.alloc(0.,*self.output_shape)
       
        # Zero padding How can I code easily?
        hidden_ = T.set_subtensor(hidden_[:,:,:,self.filter_shape[3]-1:],hidden)

        # Calculate output
        conv_odd = conv.conv2d(
            input = hidden_,
            filters = self.W_odd,
            filter_shape = self.filter_shape,
            image_shape = self.hidden_shape,)
        conv_even = conv.conv2d(
            input = hidden_,
            filters = self.W_even,
            filter_shape = self.filter_shape,
            image_shape = self.hidden_shape,)
        
        deconv_out = T.set_subtensor(deconv_out[:,:,:,::2], conv_odd)
        deconv_out = T.set_subtensor(deconv_out[:,:,:,1::2], conv_even)

        linout = deconv_out + self.b.dimshuffle('x',0,'x','x')
        
        if self.dec_hid == 'tanh':
            convout= T.tanh(linout)
        elif self.dec_hid == 'lin':
            convout=linout
        elif self.dec_hid == 'relu':
            convout=linout * (linout > 0.) + 0. * (linout < 0.)
        else:
            raise ValueError('Invalid dec_hid')
        #### Recurrent connection####
        return convout
开发者ID:ktho22,项目名称:speech_synthesis,代码行数:34,代码来源:rtdnn_bc01.py

示例6: testmodel

def testmodel(X, w, w2, w3, w_o, p_drop_conv, p_drop_hidden):
    l1a = rectify(conv2d(X, w, border_mode='valid'))
    l1 = max_pool_2d(l1a, (2, 2))
    l1 = dropout(l1, p_drop_conv)

    l2a = rectify(conv2d(l1, w2))
    l2b = max_pool_2d(l2a, (2, 2))
    l2 = T.flatten(l2b, outdim=2)
    l2 = dropout(l2, p_drop_conv)

    l3 = rectify(T.dot(l2, w3))
    l3 = dropout(l3, p_drop_hidden)
    
    pyx = softmax(T.dot(l3, w_o))
    # l3a = rectify(conv2d(l2, w3))
    # l3b = max_pool_2d(l3a, (2, 2))
    # l3 = T.flatten(l3b, outdim=2)
    # l3 = dropout(l3, p_drop_conv)

    # problem happening here
    # l4 = rectify(T.dot(l3, w4))
    # l4 = dropout(l4, p_drop_hidden)

    # pyx = softmax(T.dot(l4, w_o))
    return l1, l2, l3, pyx   
开发者ID:youralien,项目名称:smarterboard-nn,代码行数:25,代码来源:convnet.py

示例7: convolutional

def convolutional(X, X_test, input_shape, n_filters, filter_size):
	"""
	Implementation of a convolutional layer

	Parameters
	----------
	X
	input_shape
	n_filters
	filter_size

	Note
	----
	The convolutions are implemented using border_mode=same, that is the 
	output shape is the same as the input shape for the 2 last dimensions
	"""

	filters_shape = (n_filters, input_shape[1], filter_size[0], filter_size[1])
	filters = theano.shared(
		numpy.random.uniform(low=-0.1, high=0.1, size=filters_shape).astype(numpy.float32),
		'conv_filters'
	)

	output_shape = (input_shape[0], n_filters, input_shape[2], input_shape[3])

	output = conv2d(input=X, filters=filters, filter_shape=filters_shape, image_shape=input_shape, border_mode='full')
	output_test = conv2d(input=X_test, filters=filters, filter_shape=filters_shape, image_shape=input_shape, border_mode='full')

	shift_x = (filter_size[0] - 1) // 2
	shift_y = (filter_size[1] - 1) // 2

	output = output[:,:,shift_x:input_shape[2]+shift_x,shift_y:input_shape[3]+shift_y]
	output_test = output_test[:,:,shift_x:input_shape[2]+shift_x,shift_y:input_shape[3]+shift_y]

	return output, output_test, [filters], output_shape
开发者ID:tfjgeorge,项目名称:ift6268,代码行数:35,代码来源:layers.py

示例8: LCN

def LCN(data, kernel_shape):
    
    # X = T.ftensor4()

    filter_shape = (1, 1, kernel_shape, kernel_shape)
    filters = sharedX(gaussian_filter(kernel_shape).reshape(filter_shape))
    
    convout = conv2d(data, filters=filters, border_mode='full')
    
    # For each pixel, remove mean of 9x9 neighborhood
    mid = int(np.floor(kernel_shape/ 2.))
    centered_X = data - convout[:,:,mid:-mid,mid:-mid]
    
    # Scale down norm of 9x9 patch if norm is bigger than 1
    sum_sqr_XX = conv2d(T.sqr(data), filters=filters, border_mode='full')
    
    denom = T.sqrt(sum_sqr_XX[:,:,mid:-mid,mid:-mid])
    per_img_mean = denom.mean(axis = [2,3])
    divisor = T.largest(per_img_mean.dimshuffle(0, 1, 'x', 'x'), denom)
    
    new_X = centered_X / T.maximum(1., divisor)
    # new_X = new_X[:,:,mid:-mid, mid:-mid]

    new_X = T.extra_ops.squeeze(new_X)  # remove broadcastable dimension
    new_X = new_X[:, 0, :, :]  # TODO: check whether this forced squeeze is good

    return new_X
开发者ID:fengjiran,项目名称:sparse_filtering,代码行数:27,代码来源:scaling.py

示例9: __init__

    def __init__(self, rng, input_A, input_B, filter_shape, image_shape, poolsize=(2, 2)):

        print image_shape
        print filter_shape
        assert image_shape[1] == filter_shape[1]

        #calc the W_bound and init the W
        fan_in = numpy.prod(filter_shape[1:])
        fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
                   numpy.prod(poolsize))
        W_bound = numpy.sqrt(6. / (fan_in + fan_out))
        self.W = theano.shared(numpy.asarray(
            rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
            dtype = theano.config.floatX),
                        borrow = True)

        b_value = numpy.zeros((filter_shape[0],), 
                              dtype = theano.config.floatX)
        self.b = theano.shared(value = b_value, borrow = True)


        conv_out_A = conv.conv2d(input = input_A, filters = self.W, 
                filter_shape = filter_shape, image_shape = image_shape)
        conv_out_B = conv.conv2d(input = input_B, filters = self.W, 
                filter_shape = filter_shape, image_shape = image_shape)
        pooled_out_A = downsample.max_pool_2d(input = conv_out_A,
                                ds = poolsize, ignore_border = True)
        pooled_out_B = downsample.max_pool_2d(input = conv_out_B,
                                ds = poolsize, ignore_border = True)


        self.output_A = T.tanh(pooled_out_A + self.b.dimshuffle('x',0,'x','x'))
        self.output_B = T.tanh(pooled_out_B + self.b.dimshuffle('x',0,'x','x'))

        self.params = [self.W, self.b]
开发者ID:PiscesDream,项目名称:Lab_Models,代码行数:35,代码来源:lfw_cnn.py

示例10: LCNinput

def LCNinput(data, kernel_shape):
    
    X = T.ftensor4()
    filter_shape = (1, 1, kernel_shape, kernel_shape)
    filters = sharedX(gaussian_filter(kernel_shape).reshape(filter_shape))
    
    convout = conv2d(X, filters=filters, border_mode='full')
    
    # For each pixel, remove mean of 9x9 neighborhood
    mid = int(np.floor(kernel_shape/ 2.))
    centered_X = X - convout[:,:,mid:-mid,mid:-mid]
    
    # Scale down norm of 9x9 patch if norm is bigger than 1
    sum_sqr_XX = conv2d(T.sqr(X), filters=filters, border_mode='full')
    
    denom = T.sqrt(sum_sqr_XX[:,:,mid:-mid,mid:-mid])
    per_img_mean = denom.mean(axis = [2,3])
    divisor = T.largest(per_img_mean.dimshuffle(0,1, 'x', 'x'), denom)
    
    new_X = centered_X / T.maximum(1., divisor)
    # new_X = new_X[:,:,mid:-mid, mid:-mid]
    
    f = theano.function([X], new_X)
    
    return f(data)
开发者ID:fengjiran,项目名称:sparse_filtering,代码行数:25,代码来源:scaling.py

示例11: get_fprop_fn

def get_fprop_fn(variable_shape=False, include_pool=True):
    """
    build a theano function that use SAE weights to get convolved(or pooled if
    include_pool is True) features from a given input
    """
    conf = utils.get_config()
    paths = utils.get_paths()
    ae = serial.load(paths['sae']['model'])
    cnn_layer = 'cnn_layer_%i' % (conf['cnn_layers'])
    batch_size = conf[cnn_layer]['batch_size']
    nhid = conf['sae']['nhid']
    patch_size = conf['patch_size']
    region_size = conf['region_size']

    input = T.tensor4('input')
    filter_shape = (nhid, 1, patch_size, patch_size)
    filters = theano.shared(ae.get_weights().T.reshape(filter_shape))

    if variable_shape:
        out = conv.conv2d(input, filters)
    else:
        image_shape = [batch_size, 1, region_size, region_size]
        out = conv.conv2d(input, filters, filter_shape=filter_shape,
                          image_shape=image_shape)

    if include_pool:
        pool_fn = getattr(out, conf['pool_fn'])
        out = pool_fn(axis=(2, 3))
    return theano.function([input], out)
开发者ID:johnarevalo,项目名称:cnn-bcdr,代码行数:29,代码来源:fe_extraction.py

示例12: apply

    def apply(self, dataset, can_fit=True):
        x = dataset.get_design_matrix()

        denseX = T.matrix(dtype=x.dtype)

        image_shape = (len(x),) + self.img_shape
        X = denseX.reshape(image_shape)
        filters = gaussian_filter_9x9().reshape((1,1,9,9))

        convout = conv.conv2d(input = X,
                             filters = filters,
                             image_shape = image_shape,
                             filter_shape = (1, 1, 9, 9),
                             border_mode='full')

        # For each pixel, remove mean of 9x9 neighborhood
        centered_X = X - convout[:,:,4:-4,4:-4]
        
        # Scale down norm of 9x9 patch if norm is bigger than 1
        sum_sqr_XX = conv.conv2d(input = centered_X**2,
                             filters = filters,
                             image_shape = image_shape,
                             filter_shape = (1, 1, 9, 9),
                             border_mode='full')
        denom = T.sqrt(sum_sqr_XX[:,:,4:-4,4:-4])
        per_img_mean = T.mean(T.flatten(denom, outdim=3), axis=2)
        divisor = T.largest(per_img_mean.dimshuffle((0,1,'x','x')), denom)

        new_X = centered_X / divisor
        new_X = T.flatten(new_X, outdim=2)

        f = theano.function([denseX], new_X)
        dataset.set_design_matrix(f(x))
开发者ID:HoldenCaulfieldRye,项目名称:caffe-1,代码行数:33,代码来源:preproc.py

示例13: initialise

	def initialise(self):
		activation = self.activation
		rng = np.random.RandomState(235)
		inpt = self.inpt
		# initialise layer 1 weight vector. 
		#w_shp = (self.no_of_filters, 1.,self.in_channels, self.filter_length)
		w_shp = (self.no_of_filters, self.in_channels, self.filter_length,1.)
		w_bound = np.sqrt(self.in_channels* self.filter_length)
		W = theano.shared(value = np.asarray(
        rng.normal(0.,0.001,size=w_shp),
            dtype=inpt.dtype), name =self.param_names[0],borrow = True)
		b_shp = (self.no_of_filters,)
		b = theano.shared(value = np.asarray(
            rng.uniform(low=-.0, high=.0, size=b_shp),
            dtype=inpt.dtype), name =self.param_names[1],borrow = True)
		upsampled = self.inpt.repeat(int(self.pool),axis = 2)
		conv_out = conv.conv2d(upsampled, W.dimshuffle(0,3,2,1),subsample=(1,1),border_mode = "full")
		conv_out = conv_out[:,:,:,int(self.in_channels-1):-int(self.in_channels-1)]
		self.params = [W,b]
		if self.distribution==True:
			W_sigma = theano.shared(value = np.asarray(
	        rng.normal(0.,0.001,size=w_shp),
	            dtype=inpt.dtype), name ='lik_sigma',borrow = True)
			b_sigma = theano.shared(value = np.asarray(
	            rng.uniform(low=-.0, high=.0, size=b_shp),
	            dtype=inpt.dtype), name ='b_sigm',borrow = True)
			#self.output =conv_out + b.dimshuffle('x', 0, 'x', 'x')
			conv_out_sigma = conv.conv2d(upsampled, W_sigma.dimshuffle(0,3,2,1),subsample=(1,1),border_mode = "full",)
			conv_out_sigma = conv_out_sigma[:,:,:,int(self.in_channels-1):-int(self.in_channels-1)]
			self.log_sigma = conv_out_sigma + b_sigma.dimshuffle('x', 0, 'x', 'x')
			self.params +=[W_sigma,b_sigma]
		if activation!=None:
			self.output = self.activation(conv_out + b.dimshuffle('x', 0, 'x', 'x')).astype(theano.config.floatX)
		else:
			self.output = conv_out + b.dimshuffle('x', 0, 'x', 'x').astype(theano.config.floatX)
开发者ID:KyriacosShiarli,项目名称:SingNet,代码行数:35,代码来源:layers.py

示例14: model

def model(X, w, w2, w3, w4, w5, w_o, b_h1, b_h2, b_o, p_drop_conv, p_drop_hidden):
  
    l1_lin  = conv2d(X, w, border_mode='full')+b_c1.dimshuffle('x', 0, 'x', 'x')
    l1a     = alpha_c1 * rectify(l1_lin) + (1.- alpha_c1) * T.tanh(l1_lin)
    l1      = max_pool_2d(l1a, (2, 2))
    l1      = dropout(l1, p_drop_conv)

    l2_lin = conv2d(l1, w2) + b_c2.dimshuffle('x', 0, 'x', 'x')
    l2a    = alpha_c2 * rectify(l2_lin) + (1. - alpha_c2) * T.tanh(l2_lin)
    l2     = max_pool_2d(l2a, (2, 2))
    l2     = dropout(l2, p_drop_conv)

    l3_lin = conv2d(l2, w3) + b_c3.dimshuffle('x', 0, 'x', 'x')
    l3a    = alpha_c3 * rectify(l3_lin) + ( 1 - alpha_c3) * T.tanh(l3_lin)
    l3b    = max_pool_2d(l3a, (2, 2))
    l3     = T.flatten(l3b, outdim=2)
    l3     = dropout(l3, p_drop_conv)

    l4_lin = T.dot(l3, w4) + b_h1 
    l4 = alpha_h1 * rectify(l4_lin) + (1.-alpha_h1) * T.tanh(l4_lin)
    l4 = dropout(l4, p_drop_hidden)

    l5_lin = T.dot(l4, w5) + b_h2
    l5 = alpha_h1 * rectify(l5_lin) + (1.-alpha_h2) * T.tanh(l5_lin)
    l5 = dropout(l5, p_drop_hidden)

    pyx = softmax(T.dot(l5, w_o) + b_o )
    return l1, l2, l3, l4, l5, pyx
开发者ID:rjbashar,项目名称:learningActivations,代码行数:28,代码来源:z_alpha_node_3c_2h.py

示例15: model3

def model3(X, w, w2, w22, w222, w3, w4, p_drop_conv, p_drop_hidden):
  l1a = rectify(conv2d(X, w, border_mode='full'))
  l1 = max_pool_2d(l1a, (2, 2))
  l1 = dropout(l1, p_drop_conv)

  l2a = rectify(conv2d(l1, w2))
  l2 = max_pool_2d(l2a, (2, 2))
  l2 = dropout(l2, p_drop_conv)

  l22a = rectify(conv2d(l2, w22))
  l22 = max_pool_2d(l22a, (2, 2))
  l22 = dropout(l22, p_drop_conv)

  l222a = rectify(conv2d(l22, w222))
  l222 = max_pool_2d(l222a, (2, 2))
  l222 = dropout(l222, p_drop_conv)

  l3a = rectify(conv2d(l222, w3))
  l3b = max_pool_2d(l3a, (2, 2))
  l3 = T.flatten(l3b, outdim=2)
  l3 = dropout(l3, p_drop_conv)

  l4 = rectify(T.dot(l3, w4))
  l4 = dropout(l4, p_drop_hidden)

  pyx = softmax(T.dot(l4, w_o))
  return l1, l2, l22, l222, l3, l4, pyx
开发者ID:deccs,项目名称:ndsb_theano,代码行数:27,代码来源:conv_net.py


注:本文中的theano.tensor.nnet.conv.conv2d函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。