当前位置: 首页>>代码示例>>Python>>正文


Python conv.ConvolutionalSequence类代码示例

本文整理汇总了Python中blocks.bricks.conv.ConvolutionalSequence的典型用法代码示例。如果您正苦于以下问题:Python ConvolutionalSequence类的具体用法?Python ConvolutionalSequence怎么用?Python ConvolutionalSequence使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了ConvolutionalSequence类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: net_dvc

def net_dvc(image_size=(32,32)):
    convos = [5,5,5]
    pools = [2,2,2]
    filters = [100,200,300]

    tuplify = lambda x: (x,x)
    convos = list(map(tuplify, convos))
    conv_layers = [Convolutional(filter_size=s,num_filters=o, num_channels=i, name="Conv"+str(n))\
            for s,o,i,n in zip(convos, filters, [3] + filters, range(1000))]

    pool_layers = [MaxPooling(p) for p in map(tuplify, pools)]

    activations = [Rectifier() for i in convos]

    layers = [i for l in zip(conv_layers, activations, pool_layers) for i in l]

    cnn = ConvolutionalSequence(layers, 3,  image_size=image_size, name="cnn",
            weights_init=Uniform(width=.1),
            biases_init=Constant(0))

    cnn._push_allocation_config()
    cnn_output = np.prod(cnn.get_dim('output'))

    mlp_size = [cnn_output,500,2]
    mlp = MLP([Rectifier(), Softmax()], mlp_size,  name="mlp",
            weights_init=Uniform(width=.1),
            biases_init=Constant(0))

    seq = FeedforwardSequence([net.apply for net in [cnn,Flattener(),mlp]])
    seq.push_initialization_config()

    seq.initialize()
    return seq
开发者ID:refnil,项目名称:ift6266h16,代码行数:33,代码来源:train.py

示例2: VGGNet

class VGGNet(FeedforwardSequence, Initializable):

    def __init__(self, image_dimension, **kwargs):

        layers = []
        
        #############################################
        # a first block with 2 convolutions of 32 (3, 3) filters
        layers.append(Convolutional((3, 3), 32, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 32, border_mode='half'))
        layers.append(Rectifier())

        # maxpool with size=(2, 2)
        layers.append(MaxPooling((2, 2)))

        #############################################
        # a 2nd block with 3 convolutions of 64 (3, 3) filters
        layers.append(Convolutional((3, 3), 64, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 64, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 64, border_mode='half'))
        layers.append(Rectifier())
        
        # maxpool with size=(2, 2)
        layers.append(MaxPooling((2, 2)))

        #############################################
        # a 3rd block with 4 convolutions of 128 (3, 3) filters
        layers.append(Convolutional((3, 3), 128, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 128, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 128, border_mode='half'))
        layers.append(Rectifier())
        layers.append(Convolutional((3, 3), 128, border_mode='half'))
        layers.append(Rectifier())
        
        # maxpool with size=(2, 2)
        layers.append(MaxPooling((2, 2)))

        self.conv_sequence = ConvolutionalSequence(layers, 3, image_size=image_dimension)

        flattener = Flattener()

        self.top_mlp = BatchNormalizedMLP(activations=[Rectifier(), Logistic()], dims=[500, 1])

        application_methods = [self.conv_sequence.apply, flattener.apply, self.top_mlp.apply]

        super(VGGNet, self).__init__(application_methods, biases_init=Constant(0), weights_init=Uniform(width=.1), **kwargs)


    def _push_allocation_config(self):
        self.conv_sequence._push_allocation_config()
        conv_out_dim = self.conv_sequence.get_dim('output')

        print conv_out_dim
        
        self.top_mlp.dims = [numpy.prod(conv_out_dim)] + self.top_mlp.dims
开发者ID:tfjgeorge,项目名称:ift6266,代码行数:60,代码来源:vggnet_bricks_bn.py

示例3: test_convolutional_sequence_with_no_input_size

def test_convolutional_sequence_with_no_input_size():
    # suppose x is outputted by some RNN
    x = tensor.tensor4('x')
    filter_size = (1, 1)
    num_filters = 2
    num_channels = 1
    pooling_size = (1, 1)
    conv = Convolutional(filter_size, num_filters, tied_biases=False,
                         weights_init=Constant(1.), biases_init=Constant(1.))
    act = Rectifier()
    pool = MaxPooling(pooling_size)

    bad_seq = ConvolutionalSequence([conv, act, pool], num_channels,
                                    tied_biases=False)
    assert_raises_regexp(ValueError, 'Cannot infer bias size \S+',
                         bad_seq.initialize)

    seq = ConvolutionalSequence([conv, act, pool], num_channels,
                                tied_biases=True)
    try:
        seq.initialize()
        out = seq.apply(x)
    except TypeError:
        assert False, "This should have succeeded"

    assert out.ndim == 4
开发者ID:SwordYork,项目名称:blocks,代码行数:26,代码来源:test_conv.py

示例4: create_model_bricks

def create_model_bricks():
    convnet = ConvolutionalSequence(
        layers=[
            Convolutional(
                filter_size=(4, 4),
                num_filters=32,
                name='conv1'),
            SpatialBatchNormalization(name='batch_norm1'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                step=(2, 2),
                num_filters=32,
                name='conv2'),
            SpatialBatchNormalization(name='batch_norm2'),
            Rectifier(),
            Convolutional(
                filter_size=(4, 4),
                num_filters=64,
                name='conv3'),
            SpatialBatchNormalization(name='batch_norm3'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                step=(2, 2),
                num_filters=64,
                name='conv4'),
            SpatialBatchNormalization(name='batch_norm4'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                num_filters=128,
                name='conv5'),
            SpatialBatchNormalization(name='batch_norm5'),
            Rectifier(),
            Convolutional(
                filter_size=(3, 3),
                step=(2, 2),
                num_filters=128,
                name='conv6'),
            SpatialBatchNormalization(name='batch_norm6'),
            Rectifier(),
        ],
        num_channels=3,
        image_size=(64, 64),
        use_bias=False,
        weights_init=IsotropicGaussian(0.033),
        biases_init=Constant(0),
        name='convnet')
    convnet.initialize()

    mlp = BatchNormalizedMLP(
        activations=[Rectifier(), Logistic()],
        dims=[numpy.prod(convnet.get_dim('output')), 1000, 40],
        weights_init=IsotropicGaussian(0.033),
        biases_init=Constant(0),
        name='mlp')
    mlp.initialize()

    return convnet, mlp
开发者ID:anirudh9119,项目名称:discgen,代码行数:60,代码来源:train_celeba_classifier.py

示例5: build_model_mnist

def build_model_mnist():

    # CNN
    filter_size = (5, 5)
    activation = Rectifier().apply
    pooling_size = (2, 2)
    num_filters = 50
    layer0 = ConvolutionalLayer(activation=activation, filter_size=filter_size, num_filters=num_filters,
                              pooling_size=pooling_size,
                              weights_init=Uniform(width=0.1),
                              biases_init=Uniform(width=0.01), name="layer_0")

    filter_size = (3, 3)
    activation = Rectifier().apply
    num_filters = 20
    layer1 = ConvolutionalLayer(activation=activation, filter_size=filter_size, num_filters=num_filters,
                              pooling_size=pooling_size,
                              weights_init=Uniform(width=0.1),
                              biases_init=Uniform(width=0.01), name="layer_1")

    conv_layers = [layer0, layer1]
    convnet = ConvolutionalSequence(conv_layers, num_channels= 1,
                                    image_size=(28, 28))

    convnet.initialize()
    output_dim = np.prod(convnet.get_dim('output'))
    mlp = MLP(activations=[Identity()], dims=[output_dim, 10],
                        weights_init=Uniform(width=0.1),
                        biases_init=Uniform(width=0.01), name="layer_2")
    mlp.initialize()

    classifier = Classifier(convnet, mlp)
    classifier.initialize()
    return classifier
开发者ID:mducoffe,项目名称:Comparison_numpy_slice,代码行数:34,代码来源:test_mnist_hdf5.py

示例6: LeNet

class LeNet(FeedforwardSequence, Initializable):

    def __init__(self, conv_activations, num_channels, image_shape,
                 filter_sizes, feature_maps, pooling_sizes,
                 top_mlp_activations, top_mlp_dims,
                 conv_step=None, border_mode='valid', **kwargs):
        if conv_step is None:
            self.conv_step = (1, 1)
        else:
            self.conv_step = conv_step
        self.num_channels = num_channels
        self.image_shape = image_shape
        self.top_mlp_activations = top_mlp_activations
        self.top_mlp_dims = top_mlp_dims
        self.border_mode = border_mode

        conv_parameters = zip(filter_sizes, feature_maps)

        # Construct convolutional layers with corresponding parameters
        self.layers = list(interleave([
            (Convolutional(filter_size=filter_size,
                           num_filters=num_filter,
                           step=self.conv_step,
                           border_mode=self.border_mode,
                           name='conv_{}'.format(i))
             for i, (filter_size, num_filter)
             in enumerate(conv_parameters)),
            conv_activations,
            (MaxPooling(size, name='pool_{}'.format(i))
             for i, size in enumerate(pooling_sizes))]))

        self.conv_sequence = ConvolutionalSequence(self.layers, num_channels,
                                                   image_size=image_shape)

        # Construct a top MLP
        self.top_mlp = MLP(top_mlp_activations, top_mlp_dims)

        # We need to flatten the output of the last convolutional layer.
        # This brick accepts a tensor of dimension (batch_size, ...) and
        # returns a matrix (batch_size, features)
        self.flattener = Flattener()
        application_methods = [self.conv_sequence.apply, self.flattener.apply,
                               self.top_mlp.apply]
        super(LeNet, self).__init__(application_methods, **kwargs)

    @property
    def output_dim(self):
        return self.top_mlp_dims[-1]

    @output_dim.setter
    def output_dim(self, value):
        self.top_mlp_dims[-1] = value

    def _push_allocation_config(self):
        self.conv_sequence._push_allocation_config()
        conv_out_dim = self.conv_sequence.get_dim('output')

        self.top_mlp.activations = self.top_mlp_activations
        self.top_mlp.dims = [numpy.prod(conv_out_dim)] + self.top_mlp_dims
开发者ID:TLESORT,项目名称:Algorithme-Apprentissage,代码行数:59,代码来源:CNN.py

示例7: test_convolutional_sequence_with_raw_activation

def test_convolutional_sequence_with_raw_activation():
    seq = ConvolutionalSequence([Rectifier()], num_channels=4,
                                image_size=(20, 14))
    input_ = (((numpy.arange(2 * 4 * 20 * 14)
                .reshape((2, 4, 20, 14)) % 2) * 2 - 1)
              .astype(theano.config.floatX))
    expected_ = input_ * (input_ > 0)
    x = theano.tensor.tensor4()
    assert_allclose(seq.apply(x).eval({x: input_}), expected_)
开发者ID:SwordYork,项目名称:blocks,代码行数:9,代码来源:test_conv.py

示例8: test_convolutional_sequence_tied_biases_not_pushed_if_not_explicitly_set

def test_convolutional_sequence_tied_biases_not_pushed_if_not_explicitly_set():
    cnn = ConvolutionalSequence(
        sum([[Convolutional(filter_size=(1, 1), num_filters=1,
                            tied_biases=True), Rectifier()]
             for _ in range(3)], []),
        num_channels=1, image_size=(1, 1))
    cnn.allocate()
    assert [child.tied_biases for child in cnn.children
            if isinstance(child, Convolutional)]
开发者ID:SwordYork,项目名称:blocks,代码行数:9,代码来源:test_conv.py

示例9: test_pooling_works_in_convolutional_sequence

def test_pooling_works_in_convolutional_sequence():
    x = tensor.tensor4('x')
    brick = ConvolutionalSequence([AveragePooling((2, 2), step=(2, 2)),
                                   MaxPooling((4, 4), step=(2, 2),
                                              ignore_border=True)],
                                  image_size=(16, 32), num_channels=3)
    brick.allocate()
    y = brick.apply(x)
    out = y.eval({x: numpy.empty((2, 3, 16, 32), dtype=theano.config.floatX)})
    assert out.shape == (2, 3, 3, 7)
开发者ID:SwordYork,项目名称:blocks,代码行数:10,代码来源:test_conv.py

示例10: test_convolutional_transpose_original_size_inferred_conv_sequence

def test_convolutional_transpose_original_size_inferred_conv_sequence():
    brick = ConvolutionalTranspose(filter_size=(4, 5), num_filters=10,
                                   step=(3, 2))

    seq = ConvolutionalSequence([brick], num_channels=5, image_size=(6, 9))
    try:
        seq.allocate()
    except Exception as e:
        raise AssertionError('exception raised: {}: {}'.format(
            e.__class__.__name__, e))
开发者ID:SwordYork,项目名称:blocks,代码行数:10,代码来源:test_conv.py

示例11: test_convolutional_sequence_use_bias

def test_convolutional_sequence_use_bias():
    cnn = ConvolutionalSequence(
        sum([[Convolutional(filter_size=(1, 1), num_filters=1), Rectifier()]
             for _ in range(3)], []),
        num_channels=1, image_size=(1, 1),
        use_bias=False)
    cnn.allocate()
    x = tensor.tensor4()
    y = cnn.apply(x)
    params = ComputationGraph(y).parameters
    assert len(params) == 3 and all(param.name == 'W' for param in params)
开发者ID:SwordYork,项目名称:blocks,代码行数:11,代码来源:test_conv.py

示例12: test_convolutional_sequence_use_bias

def test_convolutional_sequence_use_bias():
    cnn = ConvolutionalSequence(
        [ConvolutionalActivation(activation=Rectifier().apply, filter_size=(1, 1), num_filters=1) for _ in range(3)],
        num_channels=1,
        image_size=(1, 1),
        use_bias=False,
    )
    cnn.allocate()
    x = tensor.tensor4()
    y = cnn.apply(x)
    params = ComputationGraph(y).parameters
    assert len(params) == 3 and all(param.name == "W" for param in params)
开发者ID:piergiaj,项目名称:blocks,代码行数:12,代码来源:test_conv.py

示例13: test_convolutional_sequence_with_convolutions_raw_activation

def test_convolutional_sequence_with_convolutions_raw_activation():
    seq = ConvolutionalSequence(
        [Convolutional(filter_size=(3, 3), num_filters=4),
         Rectifier(),
         Convolutional(filter_size=(5, 5), num_filters=3, step=(2, 2)),
         Tanh()],
        num_channels=2,
        image_size=(21, 39))
    seq.allocate()
    x = theano.tensor.tensor4()
    out = seq.apply(x).eval({x: numpy.ones((10, 2, 21, 39),
                                           dtype=theano.config.floatX)})
    assert out.shape == (10, 3, 8, 17)
开发者ID:SwordYork,项目名称:blocks,代码行数:13,代码来源:test_conv.py

示例14: test_batch_normalization_inside_convolutional_sequence

def test_batch_normalization_inside_convolutional_sequence():
    """Test that BN bricks work in ConvolutionalSequences."""
    conv_seq = ConvolutionalSequence(
        [Convolutional(filter_size=(3, 3), num_filters=4),
         BatchNormalization(broadcastable=(False, True, True)),
         AveragePooling(pooling_size=(2, 2)),
         BatchNormalization(broadcastable=(False, False, False)),
         MaxPooling(pooling_size=(2, 2), step=(1, 1))],
        weights_init=Constant(1.),
        biases_init=Constant(2.),
        image_size=(10, 8), num_channels=9)

    conv_seq_no_bn = ConvolutionalSequence(
        [Convolutional(filter_size=(3, 3), num_filters=4),
         AveragePooling(pooling_size=(2, 2)),
         MaxPooling(pooling_size=(2, 2), step=(1, 1))],
        weights_init=Constant(1.),
        biases_init=Constant(2.),
        image_size=(10, 8), num_channels=9)

    conv_seq.initialize()
    conv_seq_no_bn.initialize()
    rng = numpy.random.RandomState((2015, 12, 17))
    input_ = random_unif(rng, (2, 9, 10, 8))

    x = theano.tensor.tensor4()
    ybn = conv_seq.apply(x)
    y = conv_seq_no_bn.apply(x)
    yield (assert_equal, ybn.eval({x: input_}), y.eval({x: input_}))

    std = conv_seq.children[-2].population_stdev
    std.set_value(3 * std.get_value(borrow=True))
    yield (assert_equal, ybn.eval({x: input_}), y.eval({x: input_}) / 3.)
开发者ID:abdulqayyum,项目名称:blocks,代码行数:33,代码来源:test_bn.py

示例15: __init__

    def __init__(
        self,
        conv_activations,
        num_channels,
        image_shape,
        filter_sizes,
        feature_maps,
        pooling_sizes,
        top_mlp_activations,
        top_mlp_dims,
        conv_step=None,
        border_mode="valid",
        **kwargs
    ):
        if conv_step is None:
            self.conv_step = (1, 1)
        else:
            self.conv_step = conv_step
        self.num_channels = num_channels
        self.image_shape = image_shape
        self.top_mlp_activations = top_mlp_activations
        self.top_mlp_dims = top_mlp_dims
        self.border_mode = border_mode

        conv_parameters = zip(filter_sizes, feature_maps)

        # Construct convolutional, activation, and pooling layers with corresponding parameters
        self.convolution_layer = (
            Convolutional(
                filter_size=filter_size,
                num_filters=num_filter,
                step=self.conv_step,
                border_mode=self.border_mode,
                name="conv_{}".format(i),
            )
            for i, (filter_size, num_filter) in enumerate(conv_parameters)
        )

        self.BN_layer = (BatchNormalization(name="bn_conv_{}".format(i)) for i in enumerate(conv_parameters))

        self.pooling_layer = (MaxPooling(size, name="pool_{}".format(i)) for i, size in enumerate(pooling_sizes))

        self.layers = list(interleave([self.convolution_layer, self.BN_layer, conv_activations, self.pooling_layer]))

        self.conv_sequence = ConvolutionalSequence(self.layers, num_channels, image_size=image_shape)

        # Construct a top MLP
        self.top_mlp = MLP(top_mlp_activations, top_mlp_dims)

        # Construct a top batch normalized MLP
        # mlp_class = BatchNormalizedMLP
        # extra_kwargs = {'conserve_memory': False}
        # self.top_mlp = mlp_class(top_mlp_activations, top_mlp_dims, **extra_kwargs)

        # We need to flatten the output of the last convolutional layer.
        # This brick accepts a tensor of dimension (batch_size, ...) and
        # returns a matrix (batch_size, features)
        self.flattener = Flattener()
        application_methods = [self.conv_sequence.apply, self.flattener.apply, self.top_mlp.apply]
        super(LeNet, self).__init__(application_methods, **kwargs)
开发者ID:jpilaul,项目名称:IFT6266_project,代码行数:60,代码来源:test7.py


注:本文中的blocks.bricks.conv.ConvolutionalSequence类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。