本文整理汇总了Python中nnabla.parameter_scope函数的典型用法代码示例。如果您正苦于以下问题:Python parameter_scope函数的具体用法?Python parameter_scope怎么用?Python parameter_scope使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了parameter_scope函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: cnn_model_003
def cnn_model_003(ctx, x, act=F.relu, test=False):
with nn.context_scope(ctx):
# Convblock0
h = conv_unit(x, "conv00", 128, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv01", 128, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv02", 128, k=3, s=1, p=1, act=act, test=test)
h = F.max_pooling(h, (2, 2)) # 32 -> 16
with nn.parameter_scope("bn0"):
h = PF.batch_normalization(h, batch_stat=not test)
if not test:
h = F.dropout(h)
# Convblock 1
h = conv_unit(h, "conv10", 256, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv11", 256, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv12", 256, k=3, s=1, p=1, act=act, test=test)
h = F.max_pooling(h, (2, 2)) # 16 -> 8
with nn.parameter_scope("bn1"):
h = PF.batch_normalization(h, batch_stat=not test)
if not test:
h = F.dropout(h)
# Convblock 2
h = conv_unit(h, "conv20", 512, k=3, s=1, p=0, act=act, test=test) # 8 -> 6
h = conv_unit(h, "conv21", 256, k=1, s=1, p=0, act=act, test=test)
h = conv_unit(h, "conv22", 128, k=1, s=1, p=0, act=act, test=test)
h = conv_unit(h, "conv23", 10, k=1, s=1, p=0, act=act, test=test)
# Convblock 3
h = F.average_pooling(h, (6, 6))
with nn.parameter_scope("bn2"):
h = PF.batch_normalization(h, batch_stat=not test)
h = F.reshape(h, (h.shape[0], np.prod(h.shape[1:])))
return h
示例2: resnet_model
def resnet_model(ctx, x, inmaps=64, act=F.relu, test=False):
# Conv -> BN -> Relu
with nn.context_scope(ctx):
with nn.parameter_scope("conv1"):
h = PF.convolution(x, inmaps, kernel=(3, 3), pad=(1, 1), with_bias=False)
h = PF.batch_normalization(h, decay_rate=0.9, batch_stat=not test)
h = act(h)
h = res_unit(h, "conv2", act, False) # -> 32x32
h = res_unit(h, "conv3", act, True) # -> 16x16
with nn.parameter_scope("bn0"):
h = PF.batch_normalization(h, batch_stat=not test)
if not test:
h = F.dropout(h)
h = res_unit(h, "conv4", act, False) # -> 16x16
h = res_unit(h, "conv5", act, True) # -> 8x8
with nn.parameter_scope("bn1"):
h = PF.batch_normalization(h, batch_stat=not test)
if not test:
h = F.dropout(h)
h = res_unit(h, "conv6", act, False) # -> 8x8
h = res_unit(h, "conv7", act, True) # -> 4x4
with nn.parameter_scope("bn2"):
h = PF.batch_normalization(h, batch_stat=not test)
if not test:
h = F.dropout(h)
h = res_unit(h, "conv8", act, False) # -> 4x4
h = F.average_pooling(h, kernel=(4, 4)) # -> 1x1
pred = PF.affine(h, 10)
return pred
示例3: res_unit
def res_unit(x, scope_name, dn=False, test=False):
C = x.shape[1]
with nn.parameter_scope(scope_name):
# Conv -> BN -> Relu
with nn.parameter_scope("conv1"):
h = PF.convolution(x, C / 2, kernel=(1, 1), pad=(0, 0),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
h = F.relu(h)
# Conv -> BN -> Relu
with nn.parameter_scope("conv2"):
h = PF.convolution(h, C / 2, kernel=(3, 3), pad=(1, 1),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
h = F.relu(h)
# Conv -> BN
with nn.parameter_scope("conv3"):
h = PF.convolution(h, C, kernel=(1, 1), pad=(0, 0),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
# Residual -> Relu
h = F.relu(h + x)
# Maxpooling
if dn:
h = F.max_pooling(h, kernel=(2, 2), stride=(2, 2))
return h
示例4: cifar10_resnet23_prediction
def cifar10_resnet23_prediction(ctx, scope, image, test=False):
"""
Construct ResNet 23
"""
# Residual Unit
def res_unit(x, scope_name, dn=False, test=False):
C = x.shape[1]
with nn.parameter_scope(scope_name):
# Conv -> BN -> Relu
with nn.parameter_scope("conv1"):
h = PF.convolution(x, C / 2, kernel=(1, 1), pad=(0, 0),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
h = F.relu(h)
# Conv -> BN -> Relu
with nn.parameter_scope("conv2"):
h = PF.convolution(h, C / 2, kernel=(3, 3), pad=(1, 1),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
h = F.relu(h)
# Conv -> BN
with nn.parameter_scope("conv3"):
h = PF.convolution(h, C, kernel=(1, 1), pad=(0, 0),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
# Residual -> Relu
h = F.relu(h + x)
# Maxpooling
if dn:
h = F.max_pooling(h, kernel=(2, 2), stride=(2, 2))
return h
# Random generator for using the same init parameters in all devices
nmaps = 64
ncls = 10
# Conv -> BN -> Relu
with nn.context_scope(ctx):
with nn.parameter_scope(scope):
with nn.parameter_scope("conv1"):
h = PF.convolution(image, nmaps, kernel=(3, 3), pad=(1, 1),
with_bias=False)
h = PF.batch_normalization(h, batch_stat=not test)
h = F.relu(h)
h = res_unit(h, "conv2", False) # -> 32x32
h = res_unit(h, "conv3", True) # -> 16x16
h = res_unit(h, "conv4", False) # -> 16x16
h = res_unit(h, "conv5", True) # -> 8x8
h = res_unit(h, "conv6", False) # -> 8x8
h = res_unit(h, "conv7", True) # -> 4x4
h = res_unit(h, "conv8", False) # -> 4x4
h = F.average_pooling(h, kernel=(4, 4)) # -> 1x1
pred = PF.affine(h, ncls)
return pred
示例5: res_unit
def res_unit(x, scope):
C = x.shape[1]
with nn.parameter_scope(scope):
with nn.parameter_scope('conv1'):
h = F.elu(bn(PF.convolution(x, C / 2, (1, 1), with_bias=False)))
with nn.parameter_scope('conv2'):
h = F.elu(
bn(PF.convolution(h, C / 2, (3, 3), pad=(1, 1), with_bias=False)))
with nn.parameter_scope('conv3'):
h = bn(PF.convolution(h, C, (1, 1), with_bias=False))
return F.elu(F.add2(h, x, inplace=True))
示例6: res_unit
def res_unit(x, scope):
C = x.shape[1]
with nn.parameter_scope(scope):
with nn.parameter_scope('conv1'):
h = F.elu(bn(PF.binary_connect_convolution(
x, C / 2, (1, 1), with_bias=False)))
with nn.parameter_scope('conv2'):
h = F.elu(
bn(PF.binary_connect_convolution(h, C / 2, (3, 3), pad=(1, 1), with_bias=False)))
with nn.parameter_scope('conv3'):
h = bn(PF.binary_connect_convolution(
h, C, (1, 1), with_bias=False))
return F.elu(x + h)
示例7: cnn_model_003
def cnn_model_003(ctx, x, act=F.elu, do=True, test=False):
with nn.context_scope(ctx):
# Convblock0
h = conv_unit(x, "conv00", 128, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv01", 128, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv02", 128, k=3, s=1, p=1, act=act, test=test)
h = F.max_pooling(h, (2, 2)) # 32 -> 16
with nn.parameter_scope("bn0"):
h = PF.batch_normalization(h, batch_stat=not test)
if not test and do:
h = F.dropout(h)
# Convblock 1
h = conv_unit(h, "conv10", 256, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv11", 256, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv12", 256, k=3, s=1, p=1, act=act, test=test)
h = F.max_pooling(h, (2, 2)) # 16 -> 8
with nn.parameter_scope("bn1"):
h = PF.batch_normalization(h, batch_stat=not test)
if not test and do:
h = F.dropout(h)
# Convblock 2
h = conv_unit(h, "conv20", 512, k=3, s=1, p=0, act=act, test=test) # 8 -> 6
h = conv_unit(h, "conv21", 256, k=1, s=1, p=0, act=act, test=test)
h = conv_unit(h, "conv22", 128, k=1, s=1, p=0, act=act, test=test)
h_branch = h
# Convblock 3
h = conv_unit(h_branch, "conv23", 10, k=1, s=1, p=0, act=act, test=test)
h = F.average_pooling(h, (6, 6))
with nn.parameter_scope("bn2"):
h = PF.batch_normalization(h, batch_stat=not test)
pred = F.reshape(h, (h.shape[0], np.prod(h.shape[1:])))
# Uncertainty
u0 = conv_unit(h_branch, "u0", 10, k=1, s=1, p=0, act=act, test=test)
u0 = F.average_pooling(u0, (6, 6))
with nn.parameter_scope("u0bn"):
u0 = PF.batch_normalization(u0, batch_stat=not test)
log_var = F.reshape(u0, (u0.shape[0], np.prod(u0.shape[1:])))
# Uncertainty for uncertainty
u1 = conv_unit(h_branch, "u1", 10, k=1, s=1, p=0, act=act, test=test)
u1 = F.average_pooling(u1, (6, 6))
with nn.parameter_scope("u1bn"):
u1 = PF.batch_normalization(u1, batch_stat=not test)
log_s = F.reshape(u1, (u1.shape[0], np.prod(u1.shape[1:])))
return pred, log_var, log_s
示例8: cnn_model_003
def cnn_model_003(ctx, h, act=F.elu, do=True, test=False):
with nn.context_scope(ctx):
if not test:
b, c, s, s = h.shape
h = F.image_augmentation(h, (c, s, s),
min_scale=1.0, max_scale=1.5,
angle=0.5, aspect_ratio=1.3, distortion=0.2,
flip_lr=True)
# Convblock0
h = conv_unit(h, "conv00", 128, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv01", 128, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv02", 128, k=3, s=1, p=1, act=act, test=test)
h = F.max_pooling(h, (2, 2)) # 32 -> 16
with nn.parameter_scope("bn0"):
h = PF.batch_normalization(h, batch_stat=not test)
if not test and do:
h = F.dropout(h)
# Convblock 1
h = conv_unit(h, "conv10", 256, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv11", 256, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv12", 256, k=3, s=1, p=1, act=act, test=test)
h = F.max_pooling(h, (2, 2)) # 16 -> 8
with nn.parameter_scope("bn1"):
h = PF.batch_normalization(h, batch_stat=not test)
if not test and do:
h = F.dropout(h)
# Convblock 2
h = conv_unit(h, "conv20", 512, k=3, s=1, p=0, act=act, test=test) # 8 -> 6
h = conv_unit(h, "conv21", 256, k=1, s=1, p=0, act=act, test=test)
h = conv_unit(h, "conv22", 128, k=1, s=1, p=0, act=act, test=test)
u = h
# Convblock 3
h = conv_unit(h, "conv23", 10, k=1, s=1, p=0, act=act, test=test)
h = F.average_pooling(h, (6, 6))
with nn.parameter_scope("bn2"):
h = PF.batch_normalization(h, batch_stat=not test)
pred = F.reshape(h, (h.shape[0], np.prod(h.shape[1:])))
# Uncertainty
u = conv_unit(u, "u0", 10, k=1, s=1, p=0, act=act, test=test)
u = F.average_pooling(u, (6, 6))
with nn.parameter_scope("u0bn"):
u = PF.batch_normalization(u, batch_stat=not test)
log_var = F.reshape(u, (u.shape[0], np.prod(u.shape[1:])))
return pred, log_var
示例9: mnist_binary_weight_lenet_prediction
def mnist_binary_weight_lenet_prediction(image, test=False):
"""
Construct LeNet for MNIST (Binary Weight Network version).
"""
with nn.parameter_scope("conv1"):
c1 = PF.binary_weight_convolution(image, 16, (5, 5))
c1 = F.elu(F.average_pooling(c1, (2, 2)))
with nn.parameter_scope("conv2"):
c2 = PF.binary_weight_convolution(c1, 16, (5, 5))
c2 = F.elu(F.average_pooling(c2, (2, 2)))
with nn.parameter_scope("fc3"):
c3 = F.elu(PF.binary_weight_affine(c2, 50))
with nn.parameter_scope("fc4"):
c4 = PF.binary_weight_affine(c3, 10)
return c4
示例10: cnn_ae_model_000
def cnn_ae_model_000(ctx, x, act=F.relu, test=False):
with nn.parameter_scope("ae"):
with nn.context_scope(ctx):
# Convblock0
h = conv_unit(x, "conv00", 32, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv01", 32, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv02", 32, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv03", 32, k=4, s=2, p=1, act=act, test=test) # 32 -> 16
if not test:
h = F.dropout(h)
# Convblock 1
h = conv_unit(h, "conv10", 64, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv11", 64, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv12", 64, k=3, s=1, p=1, act=act, test=test)
h = conv_unit(h, "conv13", 64, k=4, s=2, p=1, act=act, test=test) # 16 -> 8
if not test:
h = F.dropout(h)
# Deconvblock0
h = deconv_unit(h, "deconv00", 64, k=4, s=2, p=1, act=act, test=test) # 8 -> 16
h = deconv_unit(h, "deconv01", 64, k=3, s=1, p=1, act=act, test=test)
h = deconv_unit(h, "deconv02", 64, k=3, s=1, p=1, act=act, test=test)
h = deconv_unit(h, "deconv03", 64, k=3, s=1, p=1, act=act, test=test)
# Deconvblock 1
h = deconv_unit(h, "deconv10", 32, k=4, s=2, p=1, act=act, test=test) # 16 -> 32
h = deconv_unit(h, "deconv11", 32, k=3, s=1, p=1, act=act, test=test)
h = deconv_unit(h, "deconv12", 32, k=3, s=1, p=1, act=act, test=test)
h = deconv_unit(h, "deconv13", 3, k=3, s=1, p=1, act=None, test=test)
return h
示例11: test_parameter_scope_slash
def test_parameter_scope_slash():
"""Testing if parameter_scope('aaa/bbb') works.
"""
import nnabla as nn
from nnabla.parameter import get_parameter_or_create
nn.clear_parameters()
with nn.parameter_scope('aaa/bbb'):
param = get_parameter_or_create('ccc', (2, 3, 4, 5))
ref = np.random.randn(*param.shape).astype(np.float32)
param.d = ref
with nn.parameter_scope('aaa'):
with nn.parameter_scope('bbb'):
param = get_parameter_or_create('ccc', (2, 3, 4, 5))
assert np.all(param.d == ref)
nn.clear_parameters()
示例12: test_parametric_function_api
def test_parametric_function_api():
"""
Testing :function:`nnabla.parametric_functions.parametric_function_api`.
"""
import nnabla as nn
import inspect
nn.clear_parameters()
shape = (2, 3, 4)
# Signature check
spec = inspect.getargspec(dummy_parametric_function)
assert spec.args == ['shape', 'f', 'i', 's', 'name']
assert spec.defaults == (10, 1, 'dummy', None)
assert dummy_parametric_function.__doc__.splitlines()[0] == 'Doc'
# Verify two different ways does the same thing.
# Using name argument
v = dummy_parametric_function(shape, name='group1')
# Using parameter_scope
with nn.parameter_scope('group1'):
v = dummy_parametric_function(shape)
params = nn.get_parameters()
assert len(params) == 2
assert list(iterkeys(params)) == ['group1/dummy/p1', 'group1/dummy/p2']
# No scope
v = dummy_parametric_function(shape)
params = nn.get_parameters()
len(params) == 4
assert list(iterkeys(params)) == ['group1/dummy/p1', 'group1/dummy/p2',
'dummy/p1', 'dummy/p2']
nn.clear_parameters()
示例13: conv_unit
def conv_unit(x, scope, maps, k=4, s=2, p=1, act=F.relu, test=False):
with nn.parameter_scope(scope):
h = PF.convolution(x, maps, kernel=(k, k), stride=(s, s), pad=(p, p))
if act is None:
return h
h = PF.batch_normalization(h, batch_stat=not test)
h = act(h)
return h
示例14: conv_unit
def conv_unit(x, scope, maps, k=4, s=2, p=1, act=F.prelu, test=False):
with nn.parameter_scope(scope):
h = PF.convolution(x, maps, kernel=(k, k), stride=(s, s), pad=(p, p))
h = PF.batch_normalization(h, batch_stat=not test)
shape = h.shape
w = nn.Variable()
w.d = 0.3
h = act(h, w)
return h
示例15: res_block
def res_block(x, scope_name, act=F.relu, dn=False, test=False):
C = x.shape[1]
with nn.parameter_scope(scope_name):
# Conv -> BN -> Relu
with nn.parameter_scope("conv1"):
h = PF.convolution(x, C/2, kernel=(1, 1), pad=(0, 0), with_bias=False)
h = PF.batch_normalization(h, decay_rate=0.9, batch_stat=not test)
h = act(h)
# Conv -> BN -> Relu
with nn.parameter_scope("conv2"):
h = PF.convolution(h, C/2, kernel=(3, 3), pad=(1, 1), with_bias=False)
h = PF.batch_normalization(h, decay_rate=0.9, batch_stat=not test)
h = act(h)
# Conv -> BN
with nn.parameter_scope("conv3"):
h = PF.convolution(h, C, kernel=(1, 1), pad=(0, 0), with_bias=False)
h = PF.batch_normalization(h, decay_rate=0.9, batch_stat=not test)
return h