本文整理汇总了Python中blocks.bricks.conv.ConvolutionalSequence.push_allocation_config方法的典型用法代码示例。如果您正苦于以下问题:Python ConvolutionalSequence.push_allocation_config方法的具体用法?Python ConvolutionalSequence.push_allocation_config怎么用?Python ConvolutionalSequence.push_allocation_config使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类blocks.bricks.conv.ConvolutionalSequence
的用法示例。
在下文中一共展示了ConvolutionalSequence.push_allocation_config方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_convolutional_sequence
# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import push_allocation_config [as 别名]
def test_convolutional_sequence():
x = tensor.tensor4('x')
num_channels = 4
pooling_size = 3
batch_size = 5
activation = Rectifier().apply
conv = ConvolutionalLayer(activation, (3, 3), 5,
(pooling_size, pooling_size),
weights_init=Constant(1.),
biases_init=Constant(5.))
conv2 = ConvolutionalActivation(activation, (2, 2), 4,
weights_init=Constant(1.))
seq = ConvolutionalSequence([conv, conv2], num_channels,
image_size=(17, 13))
seq.push_allocation_config()
assert conv.num_channels == 4
assert conv2.num_channels == 5
conv2.convolution.use_bias = False
y = seq.apply(x)
seq.initialize()
func = function([x], y)
x_val = numpy.ones((batch_size, 4, 17, 13), dtype=theano.config.floatX)
y_val = (numpy.ones((batch_size, 4, 4, 3)) *
(9 * 4 + 5) * 4 * 5)
assert_allclose(func(x_val), y_val)
示例2: test_border_mode_not_pushed
# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import push_allocation_config [as 别名]
def test_border_mode_not_pushed():
layers = [Convolutional(border_mode='full'),
ConvolutionalActivation(Rectifier().apply),
ConvolutionalActivation(Rectifier().apply, border_mode='valid'),
ConvolutionalLayer(Rectifier().apply, border_mode='full')]
stack = ConvolutionalSequence(layers)
stack.push_allocation_config()
assert stack.children[0].border_mode == 'full'
assert stack.children[1].border_mode == 'valid'
assert stack.children[2].border_mode == 'valid'
assert stack.children[3].border_mode == 'full'
stack2 = ConvolutionalSequence(layers, border_mode='full')
stack2.push_allocation_config()
assert stack2.children[0].border_mode == 'full'
assert stack2.children[1].border_mode == 'full'
assert stack2.children[2].border_mode == 'full'
assert stack2.children[3].border_mode == 'full'
示例3: create_model_brick
# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import push_allocation_config [as 别名]
def create_model_brick():
layers = [
conv_brick(5, 1, 32), bn_brick(), LeakyRectifier(leak=LEAK),
conv_brick(4, 2, 64), bn_brick(), LeakyRectifier(leak=LEAK),
conv_brick(4, 1, 128), bn_brick(), LeakyRectifier(leak=LEAK),
conv_brick(4, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK),
conv_brick(4, 1, 512), bn_brick(), LeakyRectifier(leak=LEAK),
conv_brick(1, 1, 512), bn_brick(), LeakyRectifier(leak=LEAK),
conv_brick(1, 1, 2 * NLAT)]
encoder_mapping = ConvolutionalSequence(
layers=layers, num_channels=NUM_CHANNELS, image_size=IMAGE_SIZE,
use_bias=False, name='encoder_mapping')
encoder = GaussianConditional(encoder_mapping, name='encoder')
layers = [
conv_transpose_brick(4, 1, 256), bn_brick(), LeakyRectifier(leak=LEAK),
conv_transpose_brick(4, 2, 128), bn_brick(), LeakyRectifier(leak=LEAK),
conv_transpose_brick(4, 1, 64), bn_brick(), LeakyRectifier(leak=LEAK),
conv_transpose_brick(4, 2, 32), bn_brick(), LeakyRectifier(leak=LEAK),
conv_transpose_brick(5, 1, 32), bn_brick(), LeakyRectifier(leak=LEAK),
conv_transpose_brick(1, 1, 32), bn_brick(), LeakyRectifier(leak=LEAK),
conv_brick(1, 1, NUM_CHANNELS), Logistic()]
decoder_mapping = ConvolutionalSequence(
layers=layers, num_channels=NLAT, image_size=(1, 1), use_bias=False,
name='decoder_mapping')
decoder = DeterministicConditional(decoder_mapping, name='decoder')
layers = [
conv_brick(5, 1, 32), ConvMaxout(num_pieces=NUM_PIECES),
conv_brick(4, 2, 64), ConvMaxout(num_pieces=NUM_PIECES),
conv_brick(4, 1, 128), ConvMaxout(num_pieces=NUM_PIECES),
conv_brick(4, 2, 256), ConvMaxout(num_pieces=NUM_PIECES),
conv_brick(4, 1, 512), ConvMaxout(num_pieces=NUM_PIECES)]
x_discriminator = ConvolutionalSequence(
layers=layers, num_channels=NUM_CHANNELS, image_size=IMAGE_SIZE,
name='x_discriminator')
x_discriminator.push_allocation_config()
layers = [
conv_brick(1, 1, 512), ConvMaxout(num_pieces=NUM_PIECES),
conv_brick(1, 1, 512), ConvMaxout(num_pieces=NUM_PIECES)]
z_discriminator = ConvolutionalSequence(
layers=layers, num_channels=NLAT, image_size=(1, 1), use_bias=False,
name='z_discriminator')
z_discriminator.push_allocation_config()
layers = [
conv_brick(1, 1, 1024), ConvMaxout(num_pieces=NUM_PIECES),
conv_brick(1, 1, 1024), ConvMaxout(num_pieces=NUM_PIECES),
conv_brick(1, 1, 1)]
joint_discriminator = ConvolutionalSequence(
layers=layers,
num_channels=(x_discriminator.get_dim('output')[0] +
z_discriminator.get_dim('output')[0]),
image_size=(1, 1),
name='joint_discriminator')
discriminator = XZJointDiscriminator(
x_discriminator, z_discriminator, joint_discriminator,
name='discriminator')
ali = ALI(encoder, decoder, discriminator,
weights_init=GAUSSIAN_INIT, biases_init=ZERO_INIT,
name='ali')
ali.push_allocation_config()
encoder_mapping.layers[-1].use_bias = True
encoder_mapping.layers[-1].tied_biases = False
decoder_mapping.layers[-2].use_bias = True
decoder_mapping.layers[-2].tied_biases = False
ali.initialize()
raw_marginals, = next(
create_cifar10_data_streams(500, 500)[0].get_epoch_iterator())
b_value = get_log_odds(raw_marginals)
decoder_mapping.layers[-2].b.set_value(b_value)
return ali
示例4: build_submodel
# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import push_allocation_config [as 别名]
#.........这里部分代码省略.........
else:
# there's a bit of a mix of names because `Convolutional` takes
# a "step" argument, but `ConvolutionActivation` takes "conv_step" argument
kwargs['conv_step'] = filter_step
if (pool_size[0] == 0 and pool_size[1] == 0):
layer_conv = ConvolutionalActivation(activation=activation,
filter_size=filter_size,
num_filters=num_filters,
border_mode=border_mode,
name="layer_%d" % index,
**kwargs)
else:
if pool_step is None:
pass
else:
kwargs['pooling_step'] = tuple(pool_step)
layer_conv = ConvolutionalLayer(activation=activation,
filter_size=filter_size,
num_filters=num_filters,
border_mode=border_mode,
pooling_size=pool_size,
name="layer_%d" % index,
**kwargs)
conv_layers.append(layer_conv)
convnet = ConvolutionalSequence(conv_layers, num_channels=num_channels,
image_size=image_size,
weights_init=Uniform(width=0.1),
biases_init=Constant(0.0),
name="conv_section")
convnet.push_allocation_config()
convnet.initialize()
output_dim = np.prod(convnet.get_dim('output'))
output_conv = convnet.apply(output_conv)
output_conv = Flattener().apply(output_conv)
# FULLY CONNECTED
output_mlp = output_conv
full_layers = []
assert len(L_dim_full_layers) == len(L_activation_full)
assert len(L_dim_full_layers) + 1 == len(L_endo_dropout_full_layers)
assert len(L_dim_full_layers) + 1 == len(L_exo_dropout_full_layers)
# reguarding the batch dropout : the dropout is applied on the filter
# which is equivalent to the output dimension
# you have to look at the dropout_rate of the next layer
# that is why we throw away the first value of L_exo_dropout_full_layers
L_exo_dropout_full_layers = L_exo_dropout_full_layers[1:]
pre_dim = output_dim
print "When constructing the model, the output_dim of the conv section is %d." % output_dim
if len(L_dim_full_layers):
for (dim, activation_str,
dropout, index) in zip(L_dim_full_layers,
L_activation_full,
L_exo_dropout_full_layers,
range(len(L_dim_conv_layers),
len(L_dim_conv_layers)+
len(L_dim_full_layers))
):
示例5: create_model_brick
# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import push_allocation_config [as 别名]
def create_model_brick():
# Encoder
enc_layers = [
conv_brick(2, 1, 64), bn_brick(), LeakyRectifier(leak=LEAK),
conv_brick(7, 2, 128), bn_brick(), LeakyRectifier(leak=LEAK),
conv_brick(5, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK),
conv_brick(7, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK),
conv_brick(4, 1, 512), bn_brick(), LeakyRectifier(leak=LEAK),
conv_brick(1, 1, 2 * NLAT)]
encoder_mapping = EncoderMapping(layers=enc_layers,
num_channels=NUM_CHANNELS,
n_emb=NEMB,
image_size=IMAGE_SIZE, weights_init=GAUSSIAN_INIT,
biases_init=ZERO_INIT,
use_bias=False)
encoder = GaussianConditional(encoder_mapping, name='encoder')
# Decoder
dec_layers = [
conv_transpose_brick(4, 1, 512), bn_brick(), LeakyRectifier(leak=LEAK),
conv_transpose_brick(7, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK),
conv_transpose_brick(5, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK),
conv_transpose_brick(7, 2, 128), bn_brick(), LeakyRectifier(leak=LEAK),
conv_transpose_brick(2, 1, 64), bn_brick(), LeakyRectifier(leak=LEAK),
conv_brick(1, 1, NUM_CHANNELS), Logistic()]
decoder = Decoder(
layers=dec_layers, num_channels=NLAT + NEMB, image_size=(1, 1), use_bias=False,
name='decoder_mapping')
# Discriminator
layers = [
conv_brick(2, 1, 64), LeakyRectifier(leak=LEAK),
conv_brick(7, 2, 128), bn_brick(), LeakyRectifier(leak=LEAK),
conv_brick(5, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK),
conv_brick(7, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK),
conv_brick(4, 1, 512), bn_brick(), LeakyRectifier(leak=LEAK)]
x_discriminator = ConvolutionalSequence(
layers=layers, num_channels=NUM_CHANNELS, image_size=IMAGE_SIZE,
use_bias=False, name='x_discriminator')
x_discriminator.push_allocation_config()
layers = [
conv_brick(1, 1, 1024), LeakyRectifier(leak=LEAK),
conv_brick(1, 1, 1024), LeakyRectifier(leak=LEAK)]
z_discriminator = ConvolutionalSequence(
layers=layers, num_channels=NLAT, image_size=(1, 1), use_bias=False,
name='z_discriminator')
z_discriminator.push_allocation_config()
layers = [
conv_brick(1, 1, 2048), LeakyRectifier(leak=LEAK),
conv_brick(1, 1, 2048), LeakyRectifier(leak=LEAK),
conv_brick(1, 1, 1)]
joint_discriminator = ConvolutionalSequence(
layers=layers,
num_channels=(x_discriminator.get_dim('output')[0] +
z_discriminator.get_dim('output')[0] +
NEMB),
image_size=(1, 1),
name='joint_discriminator')
discriminator = XZYJointDiscriminator(
x_discriminator, z_discriminator, joint_discriminator,
name='discriminator')
ali = ConditionalALI(encoder, decoder, discriminator,
n_cond=NCLASSES, n_emb=NEMB,
weights_init=GAUSSIAN_INIT, biases_init=ZERO_INIT,
name='ali')
ali.push_allocation_config()
encoder_mapping.layers[-1].use_bias = True
encoder_mapping.layers[-1].tied_biases = False
decoder.layers[-2].use_bias = True
decoder.layers[-2].tied_biases = False
x_discriminator.layers[0].use_bias = True
x_discriminator.layers[0].tied_biases = True
ali.initialize()
raw_marginals, = next(
create_celeba_data_streams(500, 500)[0].get_epoch_iterator())
b_value = get_log_odds(raw_marginals)
decoder.layers[-2].b.set_value(b_value)
return ali
示例6: function
# 需要导入模块: from blocks.bricks.conv import ConvolutionalSequence [as 别名]
# 或者: from blocks.bricks.conv.ConvolutionalSequence import push_allocation_config [as 别名]
decoder.initialize()
decoder_fun = function([z, y], decoder.apply(z, embeddings))
out = decoder_fun(z_hat, test_labels)
# Discriminator
layers = [
conv_brick(5, 1, 32), ConvMaxout(num_pieces=NUM_PIECES),
conv_brick(4, 2, 64), ConvMaxout(num_pieces=NUM_PIECES),
conv_brick(4, 1, 128), ConvMaxout(num_pieces=NUM_PIECES),
conv_brick(4, 2, 256), ConvMaxout(num_pieces=NUM_PIECES),
conv_brick(4, 1, 512), ConvMaxout(num_pieces=NUM_PIECES)]
x_discriminator = ConvolutionalSequence(
layers=layers, num_channels=NUM_CHANNELS, image_size=IMAGE_SIZE,
name='x_discriminator')
x_discriminator.push_allocation_config()
layers = [
conv_brick(1, 1, 512), ConvMaxout(num_pieces=NUM_PIECES),
conv_brick(1, 1, 512), ConvMaxout(num_pieces=NUM_PIECES)]
z_discriminator = ConvolutionalSequence(
layers=layers, num_channels=NLAT, image_size=(1, 1), use_bias=False,
name='z_discriminator')
z_discriminator.push_allocation_config()
layers = [
conv_brick(1, 1, 1024), ConvMaxout(num_pieces=NUM_PIECES),
conv_brick(1, 1, 1024), ConvMaxout(num_pieces=NUM_PIECES),
conv_brick(1, 1, 1)]
joint_discriminator = ConvolutionalSequence(
layers=layers,