當前位置: 首頁>>代碼示例>>Python>>正文


Python downsample.max_pool_2d方法代碼示例

本文整理匯總了Python中theano.tensor.signal.downsample.max_pool_2d方法的典型用法代碼示例。如果您正苦於以下問題:Python downsample.max_pool_2d方法的具體用法?Python downsample.max_pool_2d怎麽用?Python downsample.max_pool_2d使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在theano.tensor.signal.downsample的用法示例。


在下文中一共展示了downsample.max_pool_2d方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: model

# 需要導入模塊: from theano.tensor.signal import downsample [as 別名]
# 或者: from theano.tensor.signal.downsample import max_pool_2d [as 別名]
def model(X, w, w2, w3, w4, p_drop_conv, p_drop_hidden):
    l1a = rectify(conv2d(X, w, border_mode='full'))
    l1 = max_pool_2d(l1a, (2, 2))
    l1 = dropout(l1, p_drop_conv)

    l2a = rectify(conv2d(l1, w2))
    l2 = max_pool_2d(l2a, (2, 2))
    l2 = dropout(l2, p_drop_conv)

    l3a = rectify(conv2d(l2, w3))
    l3b = max_pool_2d(l3a, (2, 2))
    l3 = T.flatten(l3b, outdim=2)
    l3 = dropout(l3, p_drop_conv)

    l4 = rectify(T.dot(l3, w4))
    l4 = dropout(l4, p_drop_hidden)

    pyx = softmax(T.dot(l4, w_o))
    return l1, l2, l3, l4, pyx 
開發者ID:Newmu,項目名稱:Theano-Tutorials,代碼行數:21,代碼來源:5_convolutional_net.py

示例2: __init__

# 需要導入模塊: from theano.tensor.signal import downsample [as 別名]
# 或者: from theano.tensor.signal.downsample import max_pool_2d [as 別名]
def __init__(self, input_layer, pool_size, feature_dim=1, implementation='max_pool'):
        """
        pool_size: the number of inputs to be pooled together.

        feature_dim: the dimension of the input to pool across. By default this is 1
        for both dense and convolutional layers (bc01).
        For c01b, this has to be set to 0.

        implementation:
            - 'max_pool': uses theano's max_pool_2d - doesn't work for input dimension > 1024!
            - 'reshape': reshapes the tensor to create a 'pool' dimension and then uses T.max.
        """
        self.pool_size = pool_size
        self.feature_dim = feature_dim
        self.implementation = implementation
        self.input_layer = input_layer
        self.input_shape = self.input_layer.get_output_shape()
        self.mb_size = self.input_layer.mb_size

        if self.input_shape[self.feature_dim] % self.pool_size != 0:
            raise "Feature dimension is not a multiple of the pool size. Doesn't work!"

        self.params = []
        self.bias_params = [] 
開發者ID:benanne,項目名稱:kaggle-galaxies,代碼行數:26,代碼來源:layers.py

示例3: output

# 需要導入模塊: from theano.tensor.signal import downsample [as 別名]
# 或者: from theano.tensor.signal.downsample import max_pool_2d [as 別名]
def output(self, *args, **kwargs):
        input = self.input_layer.output(*args, **kwargs)

        if self.implementation == 'max_pool':
            # max_pool_2d operates on the last 2 dimensions of the input. So shift the feature dim to be last.
            shuffle_order = range(0, self.feature_dim) + range(self.feature_dim + 1, input.ndim) + [self.feature_dim]
            unshuffle_order = range(0, self.feature_dim) + [input.ndim - 1] + range(self.feature_dim, input.ndim - 1)

            input_shuffled = input.dimshuffle(*shuffle_order)
            output_shuffled = max_pool_2d(input_shuffled, (1, self.pool_size))
            output = output_shuffled.dimshuffle(*unshuffle_order)

        elif self.implementation == 'reshape':
            out_feature_dim_size = self.get_output_shape()[self.feature_dim]
            pool_shape = self.input_shape[:self.feature_dim] + (out_feature_dim_size, self.pool_size) + self.input_shape[self.feature_dim + 1:]
            
            input_reshaped = input.reshape(pool_shape)
            output = T.max(input_reshaped, axis=self.feature_dim + 1)
        else:
            raise "Uknown implementation string '%s'" % self.implementation

        return output 
開發者ID:benanne,項目名稱:kaggle-galaxies,代碼行數:24,代碼來源:layers.py

示例4: predict

# 需要導入模塊: from theano.tensor.signal import downsample [as 別名]
# 或者: from theano.tensor.signal.downsample import max_pool_2d [as 別名]
def predict(self, new_data, batch_size):
        """
        predict for new data
        """
        img_shape = None#(batch_size, 1, self.image_shape[2], self.image_shape[3])
        conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
        if self.non_linear=="tanh":
            conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
        if self.non_linear=="relu":
            conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
        else:
            pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
            output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
        return output 
開發者ID:SenticNet,項目名稱:personality-detection,代碼行數:18,代碼來源:conv_net_classes.py

示例5: compute_output

# 需要導入模塊: from theano.tensor.signal import downsample [as 別名]
# 或者: from theano.tensor.signal.downsample import max_pool_2d [as 別名]
def compute_output(self, network, in_vw):
        mode = network.find_hyperparameter(["mode"])
        out_shape = in_vw.shape[:2]
        pool_size = in_vw.shape[2:]
        pooled = max_pool_2d(in_vw.variable,
                             ds=pool_size,
                             mode=mode,
                             # doesn't make a different here,
                             # but allows using cuDNN
                             ignore_border=True)
        out_var = pooled.flatten(2)
        network.create_vw(
            "default",
            variable=out_var,
            shape=out_shape,
            tags={"output"},
        ) 
開發者ID:SBU-BMI,項目名稱:u24_lymphocyte,代碼行數:19,代碼來源:downsample.py

示例6: local_response_normalization_pool

# 需要導入模塊: from theano.tensor.signal import downsample [as 別名]
# 或者: from theano.tensor.signal.downsample import max_pool_2d [as 別名]
def local_response_normalization_pool(in_vw, alpha, k, beta, n):
    """
    using built-in pooling, works for N-D tensors (2D/3D/etc.)
    """
    from theano.tensor.signal.downsample import max_pool_2d
    assert n % 2 == 1, "n must be odd"
    in_var = in_vw.variable
    batch_size, num_channels = in_vw.symbolic_shape()[:2]
    squared = T.sqr(in_var)
    reshaped = squared.reshape((batch_size, 1, num_channels, -1))
    pooled = max_pool_2d(input=reshaped,
                         ds=(n, 1),
                         st=(1, 1),
                         padding=(n // 2, 0),
                         ignore_border=True,
                         mode="average_inc_pad")
    unreshaped = pooled.reshape(in_vw.symbolic_shape())
    # multiply by n, since we did a mean pool instead of a sum pool
    return in_var / (((alpha * n) * unreshaped + k) ** beta) 
開發者ID:SBU-BMI,項目名稱:u24_lymphocyte,代碼行數:21,代碼來源:lrn.py

示例7: predict

# 需要導入模塊: from theano.tensor.signal import downsample [as 別名]
# 或者: from theano.tensor.signal.downsample import max_pool_2d [as 別名]
def predict(self, new_data, batch_size):
        """
        predict for new data
        """
        img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3])
        conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
        if self.non_linear=="tanh":
            conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
        if self.non_linear=="relu":
            conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
            output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True)
        else:
            pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True)
            output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
        return output 
開發者ID:UKPLab,項目名稱:deeplearning4nlp-tutorial,代碼行數:18,代碼來源:conv_net_classes.py

示例8: local_response_normalization_2d_pool

# 需要導入模塊: from theano.tensor.signal import downsample [as 別名]
# 或者: from theano.tensor.signal.downsample import max_pool_2d [as 別名]
def local_response_normalization_2d_pool(in_vw, alpha, k, beta, n):
    """
    using built-in pooling
    """
    from theano.tensor.signal.downsample import max_pool_2d
    assert n % 2 == 1, "n must be odd"
    in_var = in_vw.variable
    b, ch, r, c = in_vw.symbolic_shape()
    squared = T.sqr(in_var)
    reshaped = squared.reshape((b, 1, ch, r * c))
    pooled = max_pool_2d(input=reshaped,
                         ds=(n, 1),
                         st=(1, 1),
                         padding=(n // 2, 0),
                         ignore_border=True,
                         mode="average_inc_pad")
    unreshaped = pooled.reshape((b, ch, r, c))
    # multiply by n, since we did a mean pool instead of a sum pool
    return in_var / (((alpha * n) * unreshaped + k) ** beta) 
開發者ID:diogo149,項目名稱:treeano,代碼行數:21,代碼來源:lrn.py

示例9: get_output

# 需要導入模塊: from theano.tensor.signal import downsample [as 別名]
# 或者: from theano.tensor.signal.downsample import max_pool_2d [as 別名]
def get_output(self, train):
        X = self.get_input(train)
        newshape = (X.shape[0]*X.shape[1], X.shape[2], X.shape[3], X.shape[4])
        Y = theano.tensor.reshape(X, newshape) #collapse num_samples and num_timesteps
        output = downsample.max_pool_2d(Y, ds=self.pool_size, st=self.stride, ignore_border=self.ignore_border)
        newshape = (X.shape[0], X.shape[1], output.shape[1], output.shape[2], output.shape[3])
        return theano.tensor.reshape(output, newshape) #shape is (num_samples, num_timesteps, stack_size, new_nb_row, new_nb_col) 
開發者ID:textclf,項目名稱:fancy-cnn,代碼行數:9,代碼來源:convolutions.py

示例10: __init__

# 需要導入模塊: from theano.tensor.signal import downsample [as 別名]
# 或者: from theano.tensor.signal.downsample import max_pool_2d [as 別名]
def __init__(self, input, output_maps, input_maps, filter_height, filter_width, poolsize=(2,2)):
        self.input = input
        self.bound = np.sqrt(6./(input_maps*filter_height*filter_width + output_maps*filter_height*filter_width//np.prod(poolsize)))
        self.w = theano.shared(np.asarray(np.random.uniform(low=-self.bound,high=self.bound,size=(output_maps, input_maps, filter_height, filter_width)),dtype=input.dtype))
        self.b = theano.shared(np.asarray(np.random.uniform(low=-.5, high=.5, size=(output_maps)),dtype=input.dtype))
        self.conv_out = conv2d(input=self.input, filters=self.w)
        self.pooled_out = downsample.max_pool_2d(self.conv_out, ds=poolsize, ignore_border=True)
        self.output = T.tanh(self.pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')) 
開發者ID:iamshang1,項目名稱:Projects,代碼行數:10,代碼來源:convolutional_nn.py

示例11: __init__

# 需要導入模塊: from theano.tensor.signal import downsample [as 別名]
# 或者: from theano.tensor.signal.downsample import max_pool_2d [as 別名]
def __init__(self, input, output_maps, input_maps, filter_height, filter_width, maxpool=None):
        self.input = input
        self.w = theano.shared(self.ortho_weights(output_maps,input_maps,filter_height,filter_width),borrow=True)
        self.b = theano.shared(np.zeros((output_maps,), dtype=theano.config.floatX),borrow=True)
        self.conv_out = conv2d(input=self.input, filters=self.w, border_mode='half')
        if maxpool:
            self.conv_out = downsample.max_pool_2d(self.conv_out, ds=maxpool, ignore_border=True)
        self.output = T.nnet.elu(self.conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) 
開發者ID:iamshang1,項目名稱:Projects,代碼行數:10,代碼來源:convnet.py

示例12: build_resnet

# 需要導入模塊: from theano.tensor.signal import downsample [as 別名]
# 或者: from theano.tensor.signal.downsample import max_pool_2d [as 別名]
def build_resnet(x, x_shape, features = 64):

    y0, y0_shape, p0 = conv(x, x_shape, (features,3,3,3))
    y1, y1_shape, p1 = batch_norm(y0, y0_shape)
    y2, y2_shape, _ = relu(y1, y1_shape)

    #first resnet block
    y3, y3_shape, p3 = resnet_bottleneck(y2, y2_shape, features)
    y4, y4_shape, p4 = resnet_bottleneck(y3, y3_shape, features)
    y5, y5_shape, p5 = resnet_bottleneck(y4, y4_shape, features)

    #second resnet block
    y6, y6_shape, p6 = resnet_bottleneck(y5, y5_shape, 2*features, 2)
    y7, y7_shape, p7 = resnet_bottleneck(y6, y6_shape, features)
    y8, y8_shape, p8 = resnet_bottleneck(y7, y7_shape, features)

    #third resnet block
    y9, y9_shape, p9 = resnet_bottleneck(y8, y8_shape, 4*features, 2)
    y10, y10_shape, p10 = resnet_bottleneck(y9, y9_shape, features)
    y11, y11_shape, p11 = resnet_bottleneck(y10, y10_shape, features)

    #8x8 average pooling 
    y12 = downsample.max_pool_2d(y11, (8,8), mode="average_inc_pad", ignore_border=True)
    y12_shape=(y11_shape[0], y11_shape[1],1,1)
    
    #regression layer (log-softmax)
    y13, y13_shape, p13 = conv(y12, y12_shape, (10, y12_shape[1], 1, 1))
    y14, y14_shape, _ = log_softmax(y13, y13_shape)
        
    return y14, y14_shape, p0+p1+p3+p4+p5+p6+p7+p8+p9+p10+p11+p13 
開發者ID:j3xugit,項目名稱:RaptorX-Contact,代碼行數:32,代碼來源:resnet.py

示例13: _build_expression

# 需要導入模塊: from theano.tensor.signal import downsample [as 別名]
# 或者: from theano.tensor.signal.downsample import max_pool_2d [as 別名]
def _build_expression(self):
        self.input_ = T.tensor4(dtype=self.input_dtype)
        self.expression_ = max_pool_2d(self.input_, self.max_pool_stride,
                                       ignore_border=True) 
開發者ID:sklearn-theano,項目名稱:sklearn-theano,代碼行數:6,代碼來源:base.py

示例14: set_inpt

# 需要導入模塊: from theano.tensor.signal import downsample [as 別名]
# 或者: from theano.tensor.signal.downsample import max_pool_2d [as 別名]
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
        self.inpt = inpt.reshape(self.image_shape)
        conv_out = conv.conv2d(
            input=self.inpt, filters=self.w, filter_shape=self.filter_shape,
            image_shape=self.image_shape)
        pooled_out = downsample.max_pool_2d(
            input=conv_out, ds=self.poolsize, ignore_border=True)
        self.output = self.activation_fn(
            pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
        self.output_dropout = self.output # no dropout in the convolutional layers 
開發者ID:dalmia,項目名稱:WannaPark,代碼行數:12,代碼來源:network3.py


注:本文中的theano.tensor.signal.downsample.max_pool_2d方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。