本文整理匯總了Python中mxnet.gluon.nn.InstanceNorm方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.InstanceNorm方法的具體用法?Python nn.InstanceNorm怎麽用?Python nn.InstanceNorm使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類mxnet.gluon.nn
的用法示例。
在下文中一共展示了nn.InstanceNorm方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_instancenorm
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import InstanceNorm [as 別名]
def test_instancenorm():
layer = nn.InstanceNorm(in_channels=10)
check_layer_forward(layer, (2, 10, 10, 10))
示例2: __init__
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import InstanceNorm [as 別名]
def __init__(self, output_nc, ngf=64, use_dropout=False, n_blocks=6, padding_type='reflect'):
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.output_nc = output_nc
self.ngf = ngf
self.model = nn.HybridSequential()
with self.name_scope():
self.model.add(
nn.ReflectionPad2D(3),
nn.Conv2D(ngf, kernel_size=7, padding=0),
nn.InstanceNorm(),
nn.Activation('relu')
)
n_downsampling = 2
for i in range(n_downsampling):
mult = 2**i
self.model.add(
nn.Conv2D(ngf * mult * 2, kernel_size=3,strides=2, padding=1),
nn.InstanceNorm(),
nn.Activation('relu')
)
mult = 2**n_downsampling
for i in range(n_blocks):
self.model.add(
ResnetBlock(ngf * mult, padding_type=padding_type, use_dropout=use_dropout)
)
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
self.model.add(
nn.Conv2DTranspose(int(ngf * mult / 2),kernel_size=3,strides=2,padding=1,output_padding=1),
nn.InstanceNorm(),
nn.Activation('relu')
)
self.model.add(
nn.ReflectionPad2D(3),
nn.Conv2D(output_nc,kernel_size=7,padding=0),
nn.Activation('tanh')
)
示例3: build_conv_block
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import InstanceNorm [as 別名]
def build_conv_block(self, dim, padding_type, use_dropout):
conv_block = nn.HybridSequential()
p = 0
with self.name_scope():
if padding_type == 'reflect':
conv_block.add(nn.ReflectionPad2D(1))
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block.add(
nn.Conv2D(dim, kernel_size=3, padding=p),
nn.InstanceNorm(),
nn.Activation('relu')
)
if use_dropout:
conv_block.add(nn.Dropout(0.5))
p = 0
if padding_type == 'reflect':
conv_block.add(nn.ReflectionPad2D(1))
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block.add(
nn.Conv2D(dim, kernel_size=3, padding=p),
nn.InstanceNorm()
)
return conv_block
示例4: __init__
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import InstanceNorm [as 別名]
def __init__(self,
channels,
bn_use_global_stats=False,
first_fraction=0.5,
inst_first=True,
**kwargs):
super(IBN, self).__init__(**kwargs)
self.inst_first = inst_first
h1_channels = int(math.floor(channels * first_fraction))
h2_channels = channels - h1_channels
self.split_sections = [h1_channels, h2_channels]
if self.inst_first:
self.inst_norm = nn.InstanceNorm(
in_channels=h1_channels,
scale=True)
self.batch_norm = nn.BatchNorm(
in_channels=h2_channels,
use_global_stats=bn_use_global_stats)
else:
self.batch_norm = nn.BatchNorm(
in_channels=h1_channels,
use_global_stats=bn_use_global_stats)
self.inst_norm = nn.InstanceNorm(
in_channels=h2_channels,
scale=True)
示例5: __init__
# 需要導入模塊: from mxnet.gluon import nn [as 別名]
# 或者: from mxnet.gluon.nn import InstanceNorm [as 別名]
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
activate=True,
**kwargs):
super(IBNbConvBlock, self).__init__(**kwargs)
self.activate = activate
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
in_channels=in_channels)
self.inst_norm = nn.InstanceNorm(
in_channels=out_channels,
scale=True)
if self.activate:
self.activ = nn.Activation("relu")