本文整理匯總了Python中neon.transforms.Rectlin方法的典型用法代碼示例。如果您正苦於以下問題:Python transforms.Rectlin方法的具體用法?Python transforms.Rectlin怎麽用?Python transforms.Rectlin使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類neon.transforms
的用法示例。
在下文中一共展示了transforms.Rectlin方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: conv_params
# 需要導入模塊: from neon import transforms [as 別名]
# 或者: from neon.transforms import Rectlin [as 別名]
def conv_params(fsize, nfm, stride=1, relu=True):
return dict(fshape=(fsize, fsize, nfm), strides=stride, padding=(1 if fsize > 1 else 0),
activation=(Rectlin() if relu else None),
init=Kaiming(local=True),
batch_norm=True)
示例2: module_factory
# 需要導入模塊: from neon import transforms [as 別名]
# 或者: from neon.transforms import Rectlin [as 別名]
def module_factory(nfm, stride=1):
mainpath = [Conv(**conv_params(3, nfm, stride=stride)),
Conv(**conv_params(3, nfm, relu=False))]
sidepath = [SkipNode() if stride == 1 else Conv(**id_params(nfm))]
module = [MergeSum([mainpath, sidepath]),
Activation(Rectlin())]
return module
# Structure of the deep residual part of the network:
# args.depth modules of 2 convolutional layers each at feature map depths of 16, 32, 64
示例3: module_factory
# 需要導入模塊: from neon import transforms [as 別名]
# 或者: from neon.transforms import Rectlin [as 別名]
def module_factory(nfm, stride=1):
projection = None if stride == 1 else IdentityInit()
module = [Conv(**conv_params(3, nfm, stride=stride)),
Conv(**conv_params(3, nfm, relu=False))]
module = module if args.network == 'plain' else [ResidualModule(module, projection)]
module.append(Activation(Rectlin()))
return module
# Structure of the deep residual part of the network:
# args.depth modules of 2 convolutional layers each at feature map depths of 16, 32, 64
示例4: create_network
# 需要導入模塊: from neon import transforms [as 別名]
# 或者: from neon.transforms import Rectlin [as 別名]
def create_network():
init = Kaiming()
padding = dict(pad_d=1, pad_h=1, pad_w=1)
strides = dict(str_d=2, str_h=2, str_w=2)
dilation = dict(dil_d=2, dil_h=2, dil_w=2)
common = dict(init=init, batch_norm=True, activation=Rectlin())
layers = [
Conv((9, 9, 9, 16), padding=padding, strides=strides, init=init, activation=Rectlin()),
Conv((5, 5, 5, 32), dilation=dilation, **common),
Conv((3, 3, 3, 64), dilation=dilation, **common),
Pooling((2, 2, 2), padding=padding, strides=strides),
Conv((2, 2, 2, 128), **common),
Conv((2, 2, 2, 128), **common),
Conv((2, 2, 2, 128), **common),
Conv((2, 2, 2, 256), **common),
Conv((2, 2, 2, 1024), **common),
Conv((2, 2, 2, 4096), **common),
Conv((2, 2, 2, 2048), **common),
Conv((2, 2, 2, 1024), **common),
Dropout(),
Affine(2, init=Kaiming(local=False), batch_norm=True, activation=Softmax())
]
return Model(layers=layers)
# Parse the command line arguments
示例5: _create_layer
# 需要導入模塊: from neon import transforms [as 別名]
# 或者: from neon.transforms import Rectlin [as 別名]
def _create_layer(self):
""" Build a network consistent with the DeepMind Nature paper. """
_logger.debug("Output shape = %d" % self.output_shape)
# create network
init_norm = Gaussian(loc=0.0, scale=0.01)
layers = []
# The first hidden layer convolves 32 filters of 8x8 with stride 4 with the input image and applies a rectifier nonlinearity.
layers.append(
Conv((8, 8, 32),
strides=4,
init=init_norm,
activation=Rectlin(),
batch_norm=self.batch_norm))
# The second hidden layer convolves 64 filters of 4x4 with stride 2, again followed by a rectifier nonlinearity.
layers.append(
Conv((4, 4, 64),
strides=2,
init=init_norm,
activation=Rectlin(),
batch_norm=self.batch_norm))
# This is followed by a third convolutional layer that convolves 64 filters of 3x3 with stride 1 followed by a rectifier.
layers.append(
Conv((3, 3, 64),
strides=1,
init=init_norm,
activation=Rectlin(),
batch_norm=self.batch_norm))
# The final hidden layer is fully-connected and consists of 512 rectifier units.
layers.append(
Affine(
nout=512,
init=init_norm,
activation=Rectlin(),
batch_norm=self.batch_norm))
# The output layer is a fully-connected linear layer with a single output for each valid action.
layers.append(
Affine(
nout= self.output_shape,
init = init_norm))
return layers
示例6: gen_model
# 需要導入模塊: from neon import transforms [as 別名]
# 或者: from neon.transforms import Rectlin [as 別名]
def gen_model(num_channels, height, width):
assert NervanaObject.be is not None, 'need to generate a backend before using this function'
init_uni = Kaiming()
# we have 1 issue, they have bias layers we don't allow batchnorm and biases
conv_common = dict(padding=1, init=init_uni, activation=Rectlin(), batch_norm=True)
# set up the layers
layers = []
# need to store a ref to the pooling layers to pass
# to the upsampling layers to get the argmax indicies
# for upsampling, this stack holds the pooling layer refs
pool_layers = []
# first loop generates the encoder layers
nchan = [64, 128, 256, 512, 512]
for ind in range(len(nchan)):
nchanu = nchan[ind]
lrng = 2 if ind <= 1 else 3
for lind in range(lrng):
nm = 'conv%d_%d' % (ind+1, lind+1)
layers.append(Conv((3, 3, nchanu), strides=1, name=nm, **conv_common))
layers.append(Pooling(2, strides=2, name='conv%d_pool' % ind))
pool_layers.append(layers[-1])
if ind >= 2:
layers.append(Dropout(keep=0.5, name='drop%d' % (ind+1)))
# this loop generates the decoder layers
for ind in range(len(nchan)-1,-1,-1):
nchanu = nchan[ind]
lrng = 2 if ind <= 1 else 3
# upsampling layers need a ref to the corresponding pooling layer
# to access the argmx indices for upsampling
layers.append(Upsampling(2, pool_layers.pop(), strides=2, padding=0,
name='conv%d_unpool' % ind))
for lind in range(lrng):
nm = 'deconv%d_%d' % (ind+1, lind+1)
if ind < 4 and lind == lrng-1:
nchanu = nchan[ind]/2
layers.append(Conv((3, 3, nchanu), strides=1, name=nm, **conv_common))
if ind == 0:
break
if ind >= 2:
layers.append(Dropout(keep=0.5, name='drop%d' % (ind+1)))
# last conv layer outputs 12 channels, 1 for each output class
# with a pixelwise softmax over the channels
act_last = PixelwiseSoftmax(num_channels, height, width, name="PixelwiseSoftmax")
conv_last = dict(padding=1, init=init_uni, activation=act_last, batch_norm=False)
layers.append(Conv((3, 3, num_channels), strides=1, name='deconv_out', **conv_last))
return layers