本文整理匯總了Python中mxnet.gluon.nn.Conv2D方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.Conv2D方法的具體用法?Python nn.Conv2D怎麽用?Python nn.Conv2D使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類mxnet.gluon.nn
的用法示例。
在下文中一共展示了nn.Conv2D方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2D [as 別名]
def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=InstanceNorm):
super(Bottleneck, self).__init__()
self.expansion = 4
self.downsample = downsample
if self.downsample is not None:
self.residual_layer = nn.Conv2D(in_channels=inplanes,
channels=planes * self.expansion,
kernel_size=1, strides=(stride, stride))
self.conv_block = nn.Sequential()
with self.conv_block.name_scope():
self.conv_block.add(norm_layer(in_channels=inplanes))
self.conv_block.add(nn.Activation('relu'))
self.conv_block.add(nn.Conv2D(in_channels=inplanes, channels=planes,
kernel_size=1))
self.conv_block.add(norm_layer(in_channels=planes))
self.conv_block.add(nn.Activation('relu'))
self.conv_block.add(ConvLayer(planes, planes, kernel_size=3,
stride=stride))
self.conv_block.add(norm_layer(in_channels=planes))
self.conv_block.add(nn.Activation('relu'))
self.conv_block.add(nn.Conv2D(in_channels=planes,
channels=planes * self.expansion,
kernel_size=1))
示例2: test_fill_shape_load
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2D [as 別名]
def test_fill_shape_load():
ctx = mx.context.current_context()
net1 = nn.HybridSequential()
with net1.name_scope():
net1.add(nn.Conv2D(64, kernel_size=2, padding=1),
nn.BatchNorm(),
nn.Dense(10))
net1.hybridize()
net1.initialize(ctx=ctx)
net1(mx.nd.ones((2,3,5,7), ctx))
net1.save_parameters('net_fill.params')
net2 = nn.HybridSequential()
with net2.name_scope():
net2.add(nn.Conv2D(64, kernel_size=2, padding=1),
nn.BatchNorm(),
nn.Dense(10))
net2.hybridize()
net2.initialize()
net2.load_parameters('net_fill.params', ctx)
assert net2[0].weight.shape[1] == 3, net2[0].weight.shape[1]
assert net2[1].gamma.shape[0] == 64, net2[1].gamma.shape[0]
assert net2[2].weight.shape[1] == 3072, net2[2].weight.shape[1]
示例3: test_conv2d_16c
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2D [as 別名]
def test_conv2d_16c():
chn_list = [16, 256]
kernel_list = [1, 3]
kernel_list.append(224)
batch_size = 4
class Net(gluon.HybridBlock):
def __init__(self,
chn_num,
kernel,
**kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = gluon.nn.Conv2D(chn_num, (kernel, kernel))
def hybrid_forward(self, F, x):
out = self.conv0(x)
return out
x = mx.nd.random.uniform(-1.0, 1.0, shape=(batch_size, 3, 224, 224))
for i in range(len(chn_list)):
for j in range(len(kernel_list)):
net = Net(chn_list[i], kernel_list[j])
check_layer_forward_withinput(net, x)
示例4: test_reshape_conv_reshape_conv
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2D [as 別名]
def test_reshape_conv_reshape_conv():
class Net(gluon.HybridBlock):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(64, (3, 3))
self.conv1 = nn.Conv2D(128, (3, 3))
def hybrid_forward(self, F, x):
x_reshape = x.reshape((0, 0, 128, 32))
y = self.conv0(x_reshape)
"spatial shape of y is (62, 62)"
y_reshape = y.reshape((0, 0, 124, 31))
out = self.conv1(y_reshape)
return out
x = mx.nd.random.uniform(shape=(4, 3, 64, 64))
net = Net()
check_layer_forward_withinput(net, x)
示例5: test_slice_conv_reshape_conv
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2D [as 別名]
def test_slice_conv_reshape_conv():
class Net(gluon.HybridBlock):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(64, (3, 3))
self.conv1 = nn.Conv2D(128, (3, 3))
def hybrid_forward(self, F, x):
x_slice = x.slice(begin=(0, 0, 1, 1), end=(4, 16, 33, 33))
y = self.conv0(x_slice)
"shape of y is (4, 64, 30, 30)"
y_reshape = y.reshape((0, 0, 60, 15))
out = self.conv1(y_reshape)
return out
x = mx.nd.random.uniform(shape=(4, 32, 64, 64))
net = Net()
check_layer_forward_withinput(net, x)
示例6: test_reshape_conv_slice_conv
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2D [as 別名]
def test_reshape_conv_slice_conv():
"""
This test will test gluon Conv2d computation with ndarray reshape and slice
"""
class Net(gluon.HybridBlock):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(16, (3, 3))
self.conv1 = nn.Conv2D(32, (3, 3))
def hybrid_forward(self, F, x):
x_reshape = x.reshape((0, 0, 64, 16))
y = self.conv0(x_reshape)
"shape of y is (4, 16, 62, 14)"
y_slice = y.slice(begin=(0, 0, 0, 0), end=(2, 16, 14, 14))
out = self.conv1(y_slice)
return out
x = mx.nd.random.uniform(shape=(4, 3, 32, 32))
net = Net()
check_layer_forward_withinput(net, x)
示例7: test_reshape_batchnorm
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2D [as 別名]
def test_reshape_batchnorm():
class Net(gluon.HybridBlock):
def __init__(self, shape, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(96, (1, 1))
self.bn0 = nn.BatchNorm()
self.reshape = shape
def hybrid_forward(self, F, x):
x_in = self.conv0(x)
x_reshape = x_in.reshape(self.reshape)
out = self.bn0(x_reshape)
return out
x = mx.nd.random.uniform(shape=(4, 32, 64, 64))
shape = (4, 64, 64, -1)
net = Net(shape)
check_layer_forward_withinput(net, x)
示例8: test_slice_batchnorm
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2D [as 別名]
def test_slice_batchnorm():
class Net(gluon.HybridBlock):
def __init__(self, slice, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(128, (1, 1))
self.bn0 = nn.BatchNorm()
self.slice = slice
def hybrid_forward(self, F, x):
x_in = self.conv0(x)
x_slice = x_in.slice(begin=tuple(self.slice[0]),
end=tuple(self.slice[1]))
out = self.bn0(x_slice)
return out
x = mx.nd.random.uniform(shape=(16, 128, 256, 256))
slice = [[0, 0, 0, 0], [4, 32, 32, 32]]
net = Net(slice)
check_layer_forward_withinput(net, x)
示例9: test_slice_batchnorm_slice_batchnorm
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2D [as 別名]
def test_slice_batchnorm_slice_batchnorm():
class Net(gluon.HybridBlock):
def __init__(self, slice, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(128, (1, 1))
self.bn0 = nn.BatchNorm()
self.bn1 = nn.BatchNorm()
self.slice = slice
def hybrid_forward(self, F, x):
x_in = self.conv0(x)
x_slice = x_in.slice(begin=tuple(self.slice[0][0]), end=tuple(self.slice[0][1]))
y = self.bn0(x_slice)
y_slice = y.slice(begin=tuple(self.slice[1][0]), end=tuple(self.slice[1][1]))
out = self.bn1(y_slice)
return out
x = mx.nd.random.uniform(shape=(16, 128, 256, 256))
slice = [[[0, 0, 0, 0], [4, 32, 32, 32]], [[0, 0, 0, 0], [2, 64, 16, 16]]]
net = Net(slice)
check_layer_forward_withinput(net, x)
示例10: test_slice_batchnorm_reshape_batchnorm
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2D [as 別名]
def test_slice_batchnorm_reshape_batchnorm():
class Net(gluon.HybridBlock):
def __init__(self, shape, slice, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(128, (1, 1))
self.bn0 = nn.BatchNorm()
self.bn1 = nn.BatchNorm()
self.reshape = shape
self.slice = slice
def hybrid_forward(self, F, x):
x_in = self.conv0(x)
x_slice = x_in.slice(begin=tuple(self.slice[0]), end=tuple(self.slice[1]))
y = self.bn0(x_slice)
y_reshape = y.reshape(self.reshape)
out = self.bn1(y_reshape)
return out
x = mx.nd.random.uniform(shape=(16, 128, 256, 256))
slice = [[0, 0, 0, 0], [4, 32, 32, 32]]
shape = (1, 128, 64, -1)
net = Net(shape, slice)
check_layer_forward_withinput(net, x)
示例11: test_reshape_batchnorm_slice_batchnorm
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2D [as 別名]
def test_reshape_batchnorm_slice_batchnorm():
class Net(gluon.HybridBlock):
def __init__(self, shape, slice, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.conv0 = nn.Conv2D(128, (1, 1))
self.bn0 = nn.BatchNorm()
self.bn1 = nn.BatchNorm()
self.reshape = shape
self.slice = slice
def hybrid_forward(self, F, x):
x_in = self.conv0(x)
x_reshape = x_in.reshape(self.reshape)
y = self.bn0(x_reshape)
y_slice = y.slice(begin=tuple(self.slice[0]), end=tuple(self.slice[1]))
out = self.bn1(y_slice)
return out
x = mx.nd.random.uniform(shape=(4, 32, 64, 64))
slice = [[0, 0, 0, 0], [2, 64, 32, 32]]
shape = (4, 64, 64, -1)
net = Net(shape, slice)
check_layer_forward_withinput(net, x)
示例12: test_mkldnn_engine_threading
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2D [as 別名]
def test_mkldnn_engine_threading():
net = gluon.nn.HybridSequential()
with net.name_scope():
net.add(gluon.nn.Conv2D(channels=32, kernel_size=3, activation=None))
net.collect_params().initialize(ctx=mx.cpu())
class Dummy(gluon.data.Dataset):
def __len__(self):
return 2
def __getitem__(self, key):
return key, np.ones((3, 224, 224)), np.ones((10, ))
loader = gluon.data.DataLoader(Dummy(), batch_size=2, num_workers=1)
X = (32, 3, 32, 32)
# trigger mkldnn execution thread
y = net(mx.nd.array(np.ones(X))).asnumpy()
# Use Gluon dataloader to trigger different thread.
# below line triggers different execution thread
for _ in loader:
y = net(mx.nd.array(np.ones(X))).asnumpy()
# output should be 016711406 (non-mkldnn mode output)
assert_almost_equal(y[0, 0, 0, 0], 0.016711406)
break
示例13: __init__
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2D [as 別名]
def __init__(self):
super(CellStem0, self).__init__()
self.conv_1x1 = nn.HybridSequential()
self.conv_1x1.add(nn.Activation(activation='relu'))
self.conv_1x1.add(nn.Conv2D(42, 1, strides=1, use_bias=False))
self.conv_1x1.add(nn.BatchNorm(epsilon=0.001, momentum=0.1))
self.comb_iter_0_left = BranchSeparables(42, 42, 5, 2, 2)
self.comb_iter_0_right = BranchSeparablesStem(96, 42, 7, 2, 3, bias=False)
self.comb_iter_1_left = nn.MaxPool2D(pool_size=3, strides=2, padding=1)
self.comb_iter_1_right = BranchSeparablesStem(96, 42, 7, 2, 3, bias=False)
self.comb_iter_2_left = nn.AvgPool2D(pool_size=3, strides=2, padding=1)
self.comb_iter_2_right = BranchSeparablesStem(96, 42, 5, 2, 2, bias=False)
self.comb_iter_3_right = nn.AvgPool2D(pool_size=3, strides=1, padding=1)
self.comb_iter_4_left = BranchSeparables(42, 42, 3, 1, 1, bias=False)
self.comb_iter_4_right = nn.MaxPool2D(pool_size=3, strides=2, padding=1)
示例14: _make_dense_layer
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2D [as 別名]
def _make_dense_layer(growth_rate, bn_size, dropout):
new_features = nn.HybridSequential(prefix='')
new_features.add(nn.BatchNorm())
#new_features.add(nn.Activation('relu'))
new_features.add(Act())
new_features.add(nn.Conv2D(bn_size * growth_rate, kernel_size=1, use_bias=False))
new_features.add(nn.BatchNorm())
#new_features.add(nn.Activation('relu'))
new_features.add(Act())
new_features.add(nn.Conv2D(growth_rate, kernel_size=3, padding=1, use_bias=False))
if dropout:
new_features.add(nn.Dropout(dropout))
out = gluon.contrib.nn.HybridConcurrent(axis=1, prefix='')
out.add(gluon.contrib.nn.Identity())
out.add(new_features)
return out
示例15: __init__
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import Conv2D [as 別名]
def __init__(self, num_init_features, growth_rate, block_config,
bn_size=4, dropout=0, classes=1000, **kwargs):
super(DenseNet, self).__init__(**kwargs)
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
self.features.add(nn.Conv2D(num_init_features, kernel_size=3,
strides=1, padding=1, use_bias=False))
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1))
# Add dense blocks
num_features = num_init_features
for i, num_layers in enumerate(block_config):
self.features.add(_make_dense_block(num_layers, bn_size, growth_rate, dropout, i+1))
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
self.features.add(_make_transition(num_features // 2))
num_features = num_features // 2
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
#self.features.add(nn.AvgPool2D(pool_size=7))
#self.features.add(nn.Flatten())
#self.output = nn.Dense(classes)