本文整理汇总了Python中nnabla.functions.average_pooling函数的典型用法代码示例。如果您正苦于以下问题:Python average_pooling函数的具体用法?Python average_pooling怎么用?Python average_pooling使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了average_pooling函数的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: mnist_lenet_feature
def mnist_lenet_feature(image, test=False):
"""
Construct LeNet for MNIST.
"""
c1 = F.elu(PF.convolution(image, 20, (5, 5), name='conv1'))
c1 = F.average_pooling(c1, (2, 2))
c2 = F.elu(PF.convolution(c1, 50, (5, 5), name='conv2'))
c2 = F.average_pooling(c2, (2, 2))
c3 = F.elu(PF.affine(c2, 500, name='fc3'))
c4 = PF.affine(c3, 10, name='fc4')
c5 = PF.affine(c4, 2, name='fc_embed')
return c5
示例2: cnn_model_003
def cnn_model_003(ctx, x, act=F.elu, do=True, test=False):
with nn.context_scope(ctx):
# Convblock0
h = conv_unit(x, "conv00", 128, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv01", 128, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv02", 128, k=3, s=1, p=1, act=act, test=test)
h = F.max_pooling(h, (2, 2)) # 32 -> 16
with nn.parameter_scope("bn0"):
h = PF.batch_normalization(h, batch_stat=not test)
if not test and do:
h = F.dropout(h)
# Convblock 1
h = conv_unit(h, "conv10", 256, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv11", 256, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv12", 256, k=3, s=1, p=1, act=act, test=test)
h = F.max_pooling(h, (2, 2)) # 16 -> 8
with nn.parameter_scope("bn1"):
h = PF.batch_normalization(h, batch_stat=not test)
if not test and do:
h = F.dropout(h)
# Convblock 2
h = conv_unit(h, "conv20", 512, k=3, s=1, p=0, act=act, test=test) # 8 -> 6
h = conv_unit(h, "conv21", 256, k=1, s=1, p=0, act=act, test=test)
h = conv_unit(h, "conv22", 128, k=1, s=1, p=0, act=act, test=test)
h_branch = h
# Convblock 3
h = conv_unit(h_branch, "conv23", 10, k=1, s=1, p=0, act=act, test=test)
h = F.average_pooling(h, (6, 6))
with nn.parameter_scope("bn2"):
h = PF.batch_normalization(h, batch_stat=not test)
pred = F.reshape(h, (h.shape[0], np.prod(h.shape[1:])))
# Uncertainty
u0 = conv_unit(h_branch, "u0", 10, k=1, s=1, p=0, act=act, test=test)
u0 = F.average_pooling(u0, (6, 6))
with nn.parameter_scope("u0bn"):
u0 = PF.batch_normalization(u0, batch_stat=not test)
log_var = F.reshape(u0, (u0.shape[0], np.prod(u0.shape[1:])))
# Uncertainty for uncertainty
u1 = conv_unit(h_branch, "u1", 10, k=1, s=1, p=0, act=act, test=test)
u1 = F.average_pooling(u1, (6, 6))
with nn.parameter_scope("u1bn"):
u1 = PF.batch_normalization(u1, batch_stat=not test)
log_s = F.reshape(u1, (u1.shape[0], np.prod(u1.shape[1:])))
return pred, log_var, log_s
示例3: cnn_model_003
def cnn_model_003(ctx, h, act=F.elu, do=True, test=False):
with nn.context_scope(ctx):
if not test:
b, c, s, s = h.shape
h = F.image_augmentation(h, (c, s, s),
min_scale=1.0, max_scale=1.5,
angle=0.5, aspect_ratio=1.3, distortion=0.2,
flip_lr=True)
# Convblock0
h = conv_unit(h, "conv00", 128, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv01", 128, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv02", 128, k=3, s=1, p=1, act=act, test=test)
h = F.max_pooling(h, (2, 2)) # 32 -> 16
with nn.parameter_scope("bn0"):
h = PF.batch_normalization(h, batch_stat=not test)
if not test and do:
h = F.dropout(h)
# Convblock 1
h = conv_unit(h, "conv10", 256, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv11", 256, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv12", 256, k=3, s=1, p=1, act=act, test=test)
h = F.max_pooling(h, (2, 2)) # 16 -> 8
with nn.parameter_scope("bn1"):
h = PF.batch_normalization(h, batch_stat=not test)
if not test and do:
h = F.dropout(h)
# Convblock 2
h = conv_unit(h, "conv20", 512, k=3, s=1, p=0, act=act, test=test) # 8 -> 6
h = conv_unit(h, "conv21", 256, k=1, s=1, p=0, act=act, test=test)
h = conv_unit(h, "conv22", 128, k=1, s=1, p=0, act=act, test=test)
u = h
# Convblock 3
h = conv_unit(h, "conv23", 10, k=1, s=1, p=0, act=act, test=test)
h = F.average_pooling(h, (6, 6))
with nn.parameter_scope("bn2"):
h = PF.batch_normalization(h, batch_stat=not test)
pred = F.reshape(h, (h.shape[0], np.prod(h.shape[1:])))
# Uncertainty
u = conv_unit(u, "u0", 10, k=1, s=1, p=0, act=act, test=test)
u = F.average_pooling(u, (6, 6))
with nn.parameter_scope("u0bn"):
u = PF.batch_normalization(u, batch_stat=not test)
log_var = F.reshape(u, (u.shape[0], np.prod(u.shape[1:])))
return pred, log_var
示例4: mnist_binary_weight_lenet_prediction
def mnist_binary_weight_lenet_prediction(image, test=False):
"""
Construct LeNet for MNIST (Binary Weight Network version).
"""
with nn.parameter_scope("conv1"):
c1 = PF.binary_weight_convolution(image, 16, (5, 5))
c1 = F.elu(F.average_pooling(c1, (2, 2)))
with nn.parameter_scope("conv2"):
c2 = PF.binary_weight_convolution(c1, 16, (5, 5))
c2 = F.elu(F.average_pooling(c2, (2, 2)))
with nn.parameter_scope("fc3"):
c3 = F.elu(PF.binary_weight_affine(c2, 50))
with nn.parameter_scope("fc4"):
c4 = PF.binary_weight_affine(c3, 10)
return c4
示例5: cnn_model_003
def cnn_model_003(ctx, x, act=F.relu, test=False):
with nn.context_scope(ctx):
# Convblock0
h = conv_unit(x, "conv00", 128, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv01", 128, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv02", 128, k=3, s=1, p=1, act=act, test=test)
h = F.max_pooling(h, (2, 2)) # 32 -> 16
with nn.parameter_scope("bn0"):
h = PF.batch_normalization(h, batch_stat=not test)
if not test:
h = F.dropout(h)
# Convblock 1
h = conv_unit(h, "conv10", 256, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv11", 256, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv12", 256, k=3, s=1, p=1, act=act, test=test)
h = F.max_pooling(h, (2, 2)) # 16 -> 8
with nn.parameter_scope("bn1"):
h = PF.batch_normalization(h, batch_stat=not test)
if not test:
h = F.dropout(h)
# Convblock 2
h = conv_unit(h, "conv20", 512, k=3, s=1, p=0, act=act, test=test) # 8 -> 6
h = conv_unit(h, "conv21", 256, k=1, s=1, p=0, act=act, test=test)
h = conv_unit(h, "conv22", 128, k=1, s=1, p=0, act=act, test=test)
h = conv_unit(h, "conv23", 10, k=1, s=1, p=0, act=act, test=test)
# Convblock 3
h = F.average_pooling(h, (6, 6))
with nn.parameter_scope("bn2"):
h = PF.batch_normalization(h, batch_stat=not test)
h = F.reshape(h, (h.shape[0], np.prod(h.shape[1:])))
return h
示例6: resnet_model
def resnet_model(ctx, x, inmaps=64, act=F.relu, test=False):
# Conv -> BN -> Relu
with nn.context_scope(ctx):
with nn.parameter_scope("conv1"):
h = PF.convolution(x, inmaps, kernel=(3, 3), pad=(1, 1), with_bias=False)
h = PF.batch_normalization(h, decay_rate=0.9, batch_stat=not test)
h = act(h)
h = res_unit(h, "conv2", act, False) # -> 32x32
h = res_unit(h, "conv3", act, True) # -> 16x16
with nn.parameter_scope("bn0"):
h = PF.batch_normalization(h, batch_stat=not test)
if not test:
h = F.dropout(h)
h = res_unit(h, "conv4", act, False) # -> 16x16
h = res_unit(h, "conv5", act, True) # -> 8x8
with nn.parameter_scope("bn1"):
h = PF.batch_normalization(h, batch_stat=not test)
if not test:
h = F.dropout(h)
h = res_unit(h, "conv6", act, False) # -> 8x8
h = res_unit(h, "conv7", act, True) # -> 4x4
with nn.parameter_scope("bn2"):
h = PF.batch_normalization(h, batch_stat=not test)
if not test:
h = F.dropout(h)
h = res_unit(h, "conv8", act, False) # -> 4x4
h = F.average_pooling(h, kernel=(4, 4)) # -> 1x1
pred = PF.affine(h, 10)
return pred
示例7: cifar10_resnet23_prediction
def cifar10_resnet23_prediction(ctx, image, test=False):
"""
Construct ResNet 23
"""
# Residual Unit
def res_unit(x, scope_name, dn=False, test=False):
C = x.shape[1]
with nn.parameter_scope(scope_name):
# Conv -> BN -> Relu
with nn.parameter_scope("conv1"):
h = PF.convolution(x, C / 2, kernel=(1, 1), pad=(0, 0),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
h = F.relu(h)
# Conv -> BN -> Relu
with nn.parameter_scope("conv2"):
h = PF.convolution(h, C / 2, kernel=(3, 3), pad=(1, 1),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
h = F.relu(h)
# Conv -> BN
with nn.parameter_scope("conv3"):
h = PF.convolution(h, C, kernel=(1, 1), pad=(0, 0),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
# Residual -> Relu
h = F.relu(h + x)
# Maxpooling
if dn:
h = F.max_pooling(h, kernel=(2, 2), stride=(2, 2))
return h
# Random generator for using the same init parameters in all devices
nmaps = 64
ncls = 10
# Conv -> BN -> Relu
with nn.context_scope(ctx):
with nn.parameter_scope("conv1"):
h = PF.convolution(image, nmaps, kernel=(3, 3), pad=(1, 1),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
h = F.relu(h)
h = res_unit(h, "conv2", False) # -> 32x32
h = res_unit(h, "conv3", True) # -> 16x16
h = bn_dropout(h, "bn_dropout1", test)
h = res_unit(h, "conv4", False) # -> 16x16
h = res_unit(h, "conv5", True) # -> 8x8
h = bn_dropout(h, "bn_dropout2", test)
h = res_unit(h, "conv6", False) # -> 8x8
h = res_unit(h, "conv7", True) # -> 4x4
h = bn_dropout(h, "bn_dropout3", test)
h = res_unit(h, "conv8", False) # -> 4x4
h = F.average_pooling(h, kernel=(4, 4)) # -> 1x1
pred = PF.affine(h, ncls)
return pred
示例8: mnist_binary_connect_lenet_prediction
def mnist_binary_connect_lenet_prediction(image, test=False):
"""
Construct LeNet for MNIST (BinaryNet version).
"""
with nn.parameter_scope("conv1"):
c1 = PF.binary_connect_convolution(image, 16, (5, 5))
c1 = PF.batch_normalization(c1, batch_stat=not test)
c1 = F.elu(F.average_pooling(c1, (2, 2)))
with nn.parameter_scope("conv2"):
c2 = PF.binary_connect_convolution(c1, 16, (5, 5))
c2 = PF.batch_normalization(c2, batch_stat=not test)
c2 = F.elu(F.average_pooling(c2, (2, 2)))
with nn.parameter_scope("fc3"):
c3 = PF.binary_connect_affine(c2, 50)
c3 = PF.batch_normalization(c3, batch_stat=not test)
c3 = F.elu(c3)
with nn.parameter_scope("fc4"):
c4 = PF.binary_connect_affine(c3, 10)
c4 = PF.batch_normalization(c4, batch_stat=not test)
return c4
示例9: mnist_resnet_prediction
def mnist_resnet_prediction(image, test=False):
"""
Construct ResNet for MNIST.
"""
image /= 255.0
def bn(x):
return PF.batch_normalization(x, batch_stat=not test)
def res_unit(x, scope):
C = x.shape[1]
with nn.parameter_scope(scope):
with nn.parameter_scope('conv1'):
h = F.elu(bn(PF.convolution(x, C / 2, (1, 1), with_bias=False)))
with nn.parameter_scope('conv2'):
h = F.elu(
bn(PF.convolution(h, C / 2, (3, 3), pad=(1, 1), with_bias=False)))
with nn.parameter_scope('conv3'):
h = bn(PF.convolution(h, C, (1, 1), with_bias=False))
return F.elu(F.add2(h, x, inplace=True))
# Conv1 --> 64 x 32 x 32
with nn.parameter_scope("conv1"):
c1 = F.elu(
bn(PF.convolution(image, 64, (3, 3), pad=(3, 3), with_bias=False)))
# Conv2 --> 64 x 16 x 16
c2 = F.max_pooling(res_unit(c1, "conv2"), (2, 2))
# Conv3 --> 64 x 8 x 8
c3 = F.max_pooling(res_unit(c2, "conv3"), (2, 2))
# Conv4 --> 64 x 8 x 8
c4 = res_unit(c3, "conv4")
# Conv5 --> 64 x 4 x 4
c5 = F.max_pooling(res_unit(c4, "conv5"), (2, 2))
# Conv5 --> 64 x 4 x 4
c6 = res_unit(c5, "conv6")
pl = F.average_pooling(c6, (4, 4))
with nn.parameter_scope("classifier"):
y = PF.affine(pl, 10)
return y
示例10: cnn_model_003_with_cross_attention
def cnn_model_003_with_cross_attention(ctx, x_list, act=F.relu, test=False):
"""With attention before pooling
"""
with nn.context_scope(ctx):
# Convblock0
h0_list = []
for x in x_list:
h = conv_unit(x, "conv00", 128, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv01", 128, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv02", 128, k=3, s=1, p=1, act=act, test=test)
h0_list.append(h)
# Corss attention
ca0 = attention(h0_list[0], h0_list[1], h0_list[1],
div_dim=True, softmax=True)
ca1 = attention(h0_list[1], h0_list[0], h0_list[0],
div_dim=True, softmax=True)
# Maxpooing, Batchnorm, Dropout
h0_list = []
for h in [ca0, ca1]:
h = F.max_pooling(h, (2, 2)) # 32 -> 16
with nn.parameter_scope("bn0"):
h = PF.batch_normalization(h, batch_stat=not test)
if not test:
h = F.dropout(h)
h0_list.append(h)
# Convblock 1
h1_list = []
for h in h0_list:
h = conv_unit(h, "conv10", 256, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv11", 256, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv12", 256, k=3, s=1, p=1, act=act, test=test)
h1_list.append(h)
# Corss attention
ca0 = attention(h1_list[0], h1_list[1], h1_list[1],
div_dim=True, softmax=True)
ca1 = attention(h1_list[1], h1_list[0], h1_list[0],
div_dim=True, softmax=True)
# Maxpooing, Batchnorm, Dropout
h1_list = []
for h in [ca0, ca1]:
h = F.max_pooling(h, (2, 2)) # 16 -> 8
with nn.parameter_scope("bn1"):
h = PF.batch_normalization(h, batch_stat=not test)
if not test:
h = F.dropout(h)
h1_list.append(h)
# Convblock 2
h2_list = []
for h in h1_list:
h = conv_unit(h, "conv20", 512, k=3, s=1, p=0, act=act, test=test) # 8 -> 6
h = conv_unit(h, "conv21", 256, k=1, s=1, p=0, act=act, test=test)
h = conv_unit(h, "conv22", 128, k=1, s=1, p=0, act=act, test=test)
h = conv_unit(h, "conv23", 10, k=1, s=1, p=0, act=act, test=test)
h2_list.append(h)
# Corss attention
ca0 = attention(h2_list[0], h2_list[1], h2_list[1],
div_dim=True, softmax=True)
ca1 = attention(h2_list[1], h2_list[0], h2_list[0],
div_dim=True, softmax=True)
# Convblock 3
h3_list = []
for h in [ca0, ca1]:
h = F.average_pooling(h, (6, 6))
with nn.parameter_scope("bn2"):
h = PF.batch_normalization(h, batch_stat=not test)
h = F.reshape(h, (h.shape[0], np.prod(h.shape[1:])))
h3_list.append(h)
return h3_list
示例11: cifar100_resnet23_prediction
def cifar100_resnet23_prediction(image,
ctx, test=False):
"""
Construct ResNet 23
"""
# Residual Unit
def res_unit(x, scope_name, rng, dn=False, test=False):
C = x.shape[1]
with nn.parameter_scope(scope_name):
# Conv -> BN -> Relu
with nn.parameter_scope("conv1"):
w_init = UniformInitializer(
calc_uniform_lim_glorot(C, C / 2, kernel=(1, 1)),
rng=rng)
h = PF.convolution(x, C / 2, kernel=(1, 1), pad=(0, 0),
w_init=w_init, with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
h = F.relu(h)
# Conv -> BN -> Relu
with nn.parameter_scope("conv2"):
w_init = UniformInitializer(
calc_uniform_lim_glorot(C / 2, C / 2, kernel=(3, 3)),
rng=rng)
h = PF.convolution(h, C / 2, kernel=(3, 3), pad=(1, 1),
w_init=w_init, with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
h = F.relu(h)
# Conv -> BN
with nn.parameter_scope("conv3"):
w_init = UniformInitializer(
calc_uniform_lim_glorot(C / 2, C, kernel=(1, 1)),
rng=rng)
h = PF.convolution(h, C, kernel=(1, 1), pad=(0, 0),
w_init=w_init, with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
# Residual -> Relu
h = F.relu(h + x)
# Maxpooling
if dn:
h = F.max_pooling(h, kernel=(2, 2), stride=(2, 2))
return h
# Random generator for using the same init parameters in all devices
rng = np.random.RandomState(0)
nmaps = 384
ncls = 100
# Conv -> BN -> Relu
with nn.context_scope(ctx):
with nn.parameter_scope("conv1"):
# Preprocess
if not test:
image = F.image_augmentation(image, contrast=1.0,
angle=0.25,
flip_lr=True)
image.need_grad = False
w_init = UniformInitializer(
calc_uniform_lim_glorot(3, nmaps, kernel=(3, 3)),
rng=rng)
h = PF.convolution(image, nmaps, kernel=(3, 3), pad=(1, 1),
w_init=w_init, with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
h = F.relu(h)
h = res_unit(h, "conv2", rng, False) # -> 32x32
h = res_unit(h, "conv3", rng, True) # -> 16x16
h = res_unit(h, "conv4", rng, False) # -> 16x16
h = res_unit(h, "conv5", rng, True) # -> 8x8
h = res_unit(h, "conv6", rng, False) # -> 8x8
h = res_unit(h, "conv7", rng, True) # -> 4x4
h = res_unit(h, "conv8", rng, False) # -> 4x4
h = F.average_pooling(h, kernel=(4, 4)) # -> 1x1
w_init = UniformInitializer(
calc_uniform_lim_glorot(int(np.prod(h.shape[1:])), ncls, kernel=(1, 1)), rng=rng)
pred = PF.affine(h, ncls, w_init=w_init)
return pred