当前位置: 首页>>代码示例>>Python>>正文


Python ConvolutionalSequence._push_allocation_config方法代码示例

本文整理汇总了Python中blocks.bricks.conv.ConvolutionalSequence._push_allocation_config方法的典型用法代码示例。如果您正苦于以下问题:Python ConvolutionalSequence._push_allocation_config方法的具体用法?Python ConvolutionalSequence._push_allocation_config怎么用?Python ConvolutionalSequence._push_allocation_config使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在blocks.bricks.conv.ConvolutionalSequence的用法示例。


在下文中一共展示了ConvolutionalSequence._push_allocation_config方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: VGGNet

# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import _push_allocation_config [as 别名]
class VGGNet(FeedforwardSequence, Initializable):

    def __init__(self, image_dimension, **kwargs):

        layers = []
        
        #############################################
        # a first block with 2 convolutions of 32 (3, 3) filters
        layers.append(Convolutional((3, 3), 32, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 32, border_mode='half'))
        layers.append(Rectifier())

        # maxpool with size=(2, 2)
        layers.append(MaxPooling((2, 2)))

        #############################################
        # a 2nd block with 3 convolutions of 64 (3, 3) filters
        layers.append(Convolutional((3, 3), 64, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 64, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 64, border_mode='half'))
        layers.append(Rectifier())
        
        # maxpool with size=(2, 2)
        layers.append(MaxPooling((2, 2)))

        #############################################
        # a 3rd block with 4 convolutions of 128 (3, 3) filters
        layers.append(Convolutional((3, 3), 128, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 128, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 128, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 128, border_mode='half'))
        layers.append(Rectifier())
        
        # maxpool with size=(2, 2)
        layers.append(MaxPooling((2, 2)))

        self.conv_sequence = ConvolutionalSequence(layers, 3, image_size=image_dimension)

        flattener = Flattener()

        self.top_mlp = BatchNormalizedMLP(activations=[Rectifier(), Logistic()], dims=[500, 1])

        application_methods = [self.conv_sequence.apply, flattener.apply, self.top_mlp.apply]

        super(VGGNet, self).__init__(application_methods, biases_init=Constant(0), weights_init=Uniform(width=.1), **kwargs)


    def _push_allocation_config(self):
        self.conv_sequence._push_allocation_config()
        conv_out_dim = self.conv_sequence.get_dim('output')

        print conv_out_dim
        
        self.top_mlp.dims = [numpy.prod(conv_out_dim)] + self.top_mlp.dims
开发者ID:tfjgeorge,项目名称:ift6266,代码行数:62,代码来源:vggnet_bricks_bn.py

示例2: net_dvc

# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import _push_allocation_config [as 别名]
def net_dvc(image_size=(32,32)):
    convos = [5,5,5]
    pools = [2,2,2]
    filters = [100,200,300]

    tuplify = lambda x: (x,x)
    convos = list(map(tuplify, convos))
    conv_layers = [Convolutional(filter_size=s,num_filters=o, num_channels=i, name="Conv"+str(n))\
            for s,o,i,n in zip(convos, filters, [3] + filters, range(1000))]

    pool_layers = [MaxPooling(p) for p in map(tuplify, pools)]

    activations = [Rectifier() for i in convos]

    layers = [i for l in zip(conv_layers, activations, pool_layers) for i in l]

    cnn = ConvolutionalSequence(layers, 3,  image_size=image_size, name="cnn",
            weights_init=Uniform(width=.1),
            biases_init=Constant(0))

    cnn._push_allocation_config()
    cnn_output = np.prod(cnn.get_dim('output'))

    mlp_size = [cnn_output,500,2]
    mlp = MLP([Rectifier(), Softmax()], mlp_size,  name="mlp",
            weights_init=Uniform(width=.1),
            biases_init=Constant(0))

    seq = FeedforwardSequence([net.apply for net in [cnn,Flattener(),mlp]])
    seq.push_initialization_config()

    seq.initialize()
    return seq
开发者ID:refnil,项目名称:ift6266h16,代码行数:35,代码来源:train.py

示例3: build_conv_layers

# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import _push_allocation_config [as 别名]
    def build_conv_layers(self, image=None) :

        if image is None :
            image = T.ftensor4('spectrogram')
        else :
            image = image

        conv_list = []
        for layer in range(self.layers) :
            layer_param = self.params[layer]
            conv_layer = Convolutional(layer_param[0], layer_param[1], layer_param[2])
            pool_layer = MaxPooling(layer_param[3])

            conv_layer.name = "convolution"+str(layer)
            pool_layer.name = "maxpooling"+str(layer)

            conv_list.append(conv_layer)
            conv_list.append(pool_layer)
            conv_list.append(Rectifier())

        conv_seq = ConvolutionalSequence(
            conv_list,
            self.params[0][2],
            image_size=self.image_size,
            weights_init=IsotropicGaussian(std=0.5, mean=0),
            biases_init=Constant(0))

        conv_seq._push_allocation_config()
        conv_seq.initialize()
        out = conv_seq.apply(image)

        return out, conv_seq.get_dim('output')
开发者ID:olimastro,项目名称:ift6266,代码行数:34,代码来源:convolution.py

示例4: LeNet

# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import _push_allocation_config [as 别名]
class LeNet(FeedforwardSequence, Initializable):

    def __init__(self, conv_activations, num_channels, image_shape,
                 filter_sizes, feature_maps, pooling_sizes,
                 top_mlp_activations, top_mlp_dims,
                 conv_step=None, border_mode='valid', **kwargs):
        if conv_step is None:
            self.conv_step = (1, 1)
        else:
            self.conv_step = conv_step
        self.num_channels = num_channels
        self.image_shape = image_shape
        self.top_mlp_activations = top_mlp_activations
        self.top_mlp_dims = top_mlp_dims
        self.border_mode = border_mode

        conv_parameters = zip(filter_sizes, feature_maps)

        # Construct convolutional layers with corresponding parameters
        self.layers = list(interleave([
            (Convolutional(filter_size=filter_size,
                           num_filters=num_filter,
                           step=self.conv_step,
                           border_mode=self.border_mode,
                           name='conv_{}'.format(i))
             for i, (filter_size, num_filter)
             in enumerate(conv_parameters)),
            conv_activations,
            (MaxPooling(size, name='pool_{}'.format(i))
             for i, size in enumerate(pooling_sizes))]))

        self.conv_sequence = ConvolutionalSequence(self.layers, num_channels,
                                                   image_size=image_shape)

        # Construct a top MLP
        self.top_mlp = MLP(top_mlp_activations, top_mlp_dims)

        # We need to flatten the output of the last convolutional layer.
        # This brick accepts a tensor of dimension (batch_size, ...) and
        # returns a matrix (batch_size, features)
        self.flattener = Flattener()
        application_methods = [self.conv_sequence.apply, self.flattener.apply,
                               self.top_mlp.apply]
        super(LeNet, self).__init__(application_methods, **kwargs)

    @property
    def output_dim(self):
        return self.top_mlp_dims[-1]

    @output_dim.setter
    def output_dim(self, value):
        self.top_mlp_dims[-1] = value

    def _push_allocation_config(self):
        self.conv_sequence._push_allocation_config()
        conv_out_dim = self.conv_sequence.get_dim('output')

        self.top_mlp.activations = self.top_mlp_activations
        self.top_mlp.dims = [numpy.prod(conv_out_dim)] + self.top_mlp_dims
开发者ID:TLESORT,项目名称:Algorithme-Apprentissage,代码行数:61,代码来源:CNN.py

示例5: LeNet

# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import _push_allocation_config [as 别名]
class LeNet(FeedforwardSequence, Initializable):
    """LeNet-like convolutional network.

    The class implements LeNet, which is a convolutional sequence with
    an MLP on top (several fully-connected layers). For details see
    [LeCun95]_.

    .. [LeCun95] LeCun, Yann, et al.
       *Comparison of learning algorithms for handwritten digit
       recognition.*,
       International conference on artificial neural networks. Vol. 60.

    Parameters
    ----------
    conv_activations : list of :class:`.Brick`
        Activations for convolutional network.
    num_channels : int
        Number of channels in the input image.
    image_shape : tuple
        Input image shape.
    filter_sizes : list of tuples
        Filter sizes of :class:`.blocks.conv.ConvolutionalLayer`.
    feature_maps : list
        Number of filters for each of convolutions.
    pooling_sizes : list of tuples
        Sizes of max pooling for each convolutional layer.
    top_mlp_activations : list of :class:`.blocks.bricks.Activation`
        List of activations for the top MLP.
    top_mlp_dims : list
        Numbers of hidden units and the output dimension of the top MLP.
    conv_step : tuples
        Step of convolution (similar for all layers).
    border_mode : str
        Border mode of convolution (similar for all layers).

    """
    def __init__(self, conv_activations, num_channels, image_shape,
                 filter_sizes, feature_maps, pooling_sizes,
                 top_mlp_activations, top_mlp_dims,
                 conv_step=None, border_mode='valid', **kwargs):
        if conv_step is None:
            self.conv_step = (1, 1)
        else:
            self.conv_step = conv_step
        self.num_channels = num_channels
        self.image_shape = image_shape
        self.top_mlp_activations = top_mlp_activations
        self.top_mlp_dims = top_mlp_dims
        self.border_mode = border_mode

        conv_parameters = zip(conv_activations, filter_sizes, feature_maps)

        # Construct convolutional layers with corresponding parameters
        self.layers = list(interleave([
            (ConvolutionalActivation(filter_size=filter_size,
                                     num_filters=num_filter,
                                     activation=activation.apply,
                                     step=self.conv_step,
                                     border_mode=self.border_mode,
                                     name='conv_{}'.format(i))
             for i, (activation, filter_size, num_filter)
             in enumerate(conv_parameters)),
            (MaxPooling(size, name='pool_{}'.format(i))
             for i, size in enumerate(pooling_sizes))]))

        self.conv_sequence = ConvolutionalSequence(self.layers, num_channels,
                                                   image_size=image_shape)

        # Construct a top MLP
        self.top_mlp = MLP(top_mlp_activations, top_mlp_dims)

        # We need to flatten the output of the last convolutional layer.
        # This brick accepts a tensor of dimension (batch_size, ...) and
        # returns a matrix (batch_size, features)
        self.flattener = Flattener()
        application_methods = [self.conv_sequence.apply, self.flattener.apply,
                               self.top_mlp.apply]
        super(LeNet, self).__init__(application_methods, **kwargs)

    @property
    def output_dim(self):
        return self.top_mlp_dims[-1]

    @output_dim.setter
    def output_dim(self, value):
        self.top_mlp_dims[-1] = value

    def _push_allocation_config(self):
        self.conv_sequence._push_allocation_config()
        conv_out_dim = self.conv_sequence.get_dim('output')

        self.top_mlp.activations = self.top_mlp_activations
        self.top_mlp.dims = [numpy.prod(conv_out_dim)] + self.top_mlp_dims
开发者ID:halidanmu,项目名称:blocks-examples,代码行数:95,代码来源:__init__.py

示例6: LeNet

# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import _push_allocation_config [as 别名]
class LeNet(FeedforwardSequence, Initializable):
    """LeNet-like convolutional network.

    The class implements LeNet, which is a convolutional sequence with
    an MLP on top (several fully-connected layers). For details see
    [LeCun95]_.

    .. [LeCun95] LeCun, Yann, et al.
       *Comparison of learning algorithms for handwritten digit
       recognition.*,
       International conference on artificial neural networks. Vol. 60.

    Parameters
    ----------
    conv_activations : list of :class:`.Brick`
        Activations for convolutional network.
    num_channels : int
        Number of channels in the input image.
    image_shape : tuple
        Input image shape.
    filter_sizes : list of tuples
        Filter sizes of :class:`.blocks.conv.ConvolutionalLayer`.
    feature_maps : list
        Number of filters for each of convolutions.
    pooling_sizes : list of tuples
        Sizes of max pooling for each convolutional layer.
    repeat_times : list of int
        How many times to repeat each convolutional layer.
    top_mlp_activations : list of :class:`.blocks.bricks.Activation`
        List of activations for the top MLP.
    top_mlp_dims : list
        Numbers of hidden units and the output dimension of the top MLP.
    stride : int
        Step of convolution for the first layer, 1 will be used
        for all other layers.
    border_mode : str
        Border mode of convolution (similar for all layers).
    batch_norm : str
    """
    def __init__(self, conv_activations, num_channels, image_shape,
                 filter_sizes, feature_maps, pooling_sizes, repeat_times,
                 top_mlp_activations, top_mlp_dims,
                 stride, batch_norm, border_mode='valid', **kwargs):
        self.stride = stride
        self.num_channels = num_channels
        self.image_shape = image_shape
        self.top_mlp_activations = top_mlp_activations
        self.top_mlp_dims = top_mlp_dims
        self.border_mode = border_mode

        # Construct convolutional layers with corresponding parameters
        self.layers = []
        for i, activation in enumerate(conv_activations):
            for j in range(repeat_times[i]):
                self.layers.append(
                    Convolutional(
                        filter_size=filter_sizes[i], num_filters=feature_maps[i],
                        step=(1, 1) if i > 0 or j > 0 else (self.stride, self.stride),
                        border_mode=self.border_mode,
                        name='conv_{}_{}'.format(i, j)))
                if batch_norm:
                    self.layers.append(
                        BatchNormalization(broadcastable=(False, True, True),
                                        conserve_memory=True,
                                        mean_only=batch_norm == 'mean-only',
                                        name='bn_{}_{}'.format(i, j)))
                self.layers.append(activation)
            self.layers.append(MaxPooling(pooling_sizes[i], name='pool_{}'.format(i)))

        self.conv_sequence = ConvolutionalSequence(self.layers, num_channels,
                                                   image_size=image_shape)

        # Construct a top MLP
        self.top_mlp = MLP(top_mlp_activations, top_mlp_dims)

        # We need to flatten the output of the last convolutional layer.
        # This brick accepts a tensor of dimension (batch_size, ...) and
        # returns a matrix (batch_size, features)
        self.flattener = Flattener()
        application_methods = [self.conv_sequence.apply, self.flattener.apply,
                               self.top_mlp.apply]
        super(LeNet, self).__init__(application_methods, **kwargs)

    @property
    def output_dim(self):
        return self.top_mlp_dims[-1]

    @output_dim.setter
    def output_dim(self, value):
        self.top_mlp_dims[-1] = value

    def _push_allocation_config(self):
        self.conv_sequence._push_allocation_config()
        conv_out_dim = self.conv_sequence.get_dim('output')

        self.top_mlp.activations = self.top_mlp_activations
        self.top_mlp.dims = [numpy.prod(conv_out_dim)] + self.top_mlp_dims

    @application(inputs=['image'])
    def apply_5windows(self, image):
#.........这里部分代码省略.........
开发者ID:rizar,项目名称:ift6266h16,代码行数:103,代码来源:main.py

示例7: LeNet

# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import _push_allocation_config [as 别名]
class LeNet(FeedforwardSequence, Initializable):
    '''
    ----------
    conv_activations : list of :class:`.Brick`
        Activations for convolutional network.
    num_channels : int
        Number of channels in the input image.
    image_shape : tuple
        Input image shape.
    filter_sizes : list of tuples
        Filter sizes of :class:`.blocks.conv.ConvolutionalLayer`.
    feature_maps : list
        Number of filters for each of convolutions.
    pooling_sizes : list of tuples
        Sizes of max pooling for each convolutional layer.
    top_mlp_activations : list of :class:`.blocks.bricks.Activation`
        List of activations for the top MLP.
    top_mlp_dims : list
        Numbers of hidden units and the output dimension of the top MLP.
    conv_step : tuples
        Step of convolution (similar for all layers).
    border_mode : str
        Border mode of convolution (similar for all layers).
    '''
    
    def __init__(self, conv_activations, num_channels, image_shape,
                 filter_sizes, feature_maps, pooling_sizes,
                 top_mlp_activations, top_mlp_dims,
                 conv_step=None, border_mode='valid', **kwargs):
        if conv_step is None:
            self.conv_step = (1, 1)
        else:
            self.conv_step = conv_step
        self.num_channels = num_channels
        self.image_shape = image_shape
        self.top_mlp_activations = top_mlp_activations
        self.top_mlp_dims = top_mlp_dims
        self.border_mode = border_mode

        conv_parameters = zip(filter_sizes, feature_maps)

        # Construct convolutional layers with corresponding parameters
        self.layers = list(interleave([
            (Convolutional(filter_size=filter_size,
                           num_filters=num_filter,
                           border_mode=self.border_mode,
                           name='conv_{}'.format(i))
             for i, (filter_size, num_filter)
             in enumerate(conv_parameters)),
            conv_activations,
            (MaxPooling(size, name='pool_{}'.format(i))
             for i, size in enumerate(pooling_sizes))]))

        self.conv_sequence = ConvolutionalSequence(self.layers, num_channels,
                                                   image_size=image_shape)

        # Construct a top MLP
        self.top_mlp = MLP(top_mlp_activations, top_mlp_dims)

        # We need to flatten the output of the last convolutional layer.
        # This brick accepts a tensor of dimension (batch_size, ...) and
        # returns a matrix (batch_size, features)
        self.flattener = Flattener()
        application_methods = [self.conv_sequence.apply, self.flattener.apply,
                               self.top_mlp.apply]
        super(LeNet, self).__init__(application_methods, **kwargs)
    
    @property
    def output_dim(self):
        return self.top_mlp_dims[-1]

    @output_dim.setter
    def output_dim(self, value):
        self.top_mlp_dims[-1] = value

    def _push_allocation_config(self):
        self.conv_sequence._push_allocation_config()
        conv_out_dim = self.conv_sequence.get_dim('output')

        self.top_mlp.activations = self.top_mlp_activations
        self.top_mlp.dims = [numpy.prod(conv_out_dim)] + self.top_mlp_dims
开发者ID:ift6266,项目名称:cats_dogs,代码行数:83,代码来源:main_best_model.py


注:本文中的blocks.bricks.conv.ConvolutionalSequence._push_allocation_config方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。