本文整理匯總了Python中neon.layers.Conv方法的典型用法代碼示例。如果您正苦於以下問題:Python layers.Conv方法的具體用法?Python layers.Conv怎麽用?Python layers.Conv使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類neon.layers
的用法示例。
在下文中一共展示了layers.Conv方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: main_branch
# 需要導入模塊: from neon import layers [as 別名]
# 或者: from neon.layers import Conv [as 別名]
def main_branch(branch_nodes):
return [Conv((7, 7, 64), padding=3, strides=2, **common),
Pooling(**pool3s2p1),
Conv((1, 1, 64), **common),
Conv((3, 3, 192), **commonp1),
Pooling(**pool3s2p1),
inception([(64, ), (96, 128), (16, 32), (32, )]),
inception([(128,), (128, 192), (32, 96), (64, )]),
Pooling(**pool3s2p1),
inception([(192,), (96, 208), (16, 48), (64, )]),
branch_nodes[0],
inception([(160,), (112, 224), (24, 64), (64, )]),
inception([(128,), (128, 256), (24, 64), (64, )]),
inception([(112,), (144, 288), (32, 64), (64, )]),
branch_nodes[1],
inception([(256,), (160, 320), (32, 128), (128,)]),
Pooling(**pool3s2p1),
inception([(256,), (160, 320), (32, 128), (128,)]),
inception([(384,), (192, 384), (48, 128), (128,)]),
Pooling(fshape=7, strides=1, op="avg"),
Affine(nout=1000, init=init1, activation=Softmax(), bias=Constant(0))]
示例2: inception
# 需要導入模塊: from neon import layers [as 別名]
# 或者: from neon.layers import Conv [as 別名]
def inception(kvals):
(p1, p2, p3, p4) = kvals
branch1 = [Conv((1, 1, p1[0]), **common)]
branch2 = [Conv((1, 1, p2[0]), **common),
Conv((3, 3, p2[1]), **commonp1)]
branch3 = [Conv((1, 1, p3[0]), **common),
Conv((5, 5, p3[1]), **commonp2)]
branch4 = [Pooling(op="max", **pool3s1p1),
Conv((1, 1, p4[0]), **common)]
return MergeBroadcast(layers=[branch1, branch2, branch3, branch4], merge="depth")
示例3: aux_branch
# 需要導入模塊: from neon import layers [as 別名]
# 或者: from neon.layers import Conv [as 別名]
def aux_branch(bnode):
return [bnode,
Pooling(fshape=5, strides=3, op="avg"),
Conv((1, 1, 128), **common),
Affine(nout=1024, init=init1, activation=relu, bias=bias),
Dropout(keep=0.3),
Affine(nout=1000, init=init1, activation=Softmax(), bias=Constant(0))]
# Now construct the model
示例4: module_factory
# 需要導入模塊: from neon import layers [as 別名]
# 或者: from neon.layers import Conv [as 別名]
def module_factory(nfm, stride=1):
mainpath = [Conv(**conv_params(3, nfm, stride=stride)),
Conv(**conv_params(3, nfm, relu=False))]
sidepath = [SkipNode() if stride == 1 else Conv(**id_params(nfm))]
module = [MergeSum([mainpath, sidepath]),
Activation(Rectlin())]
return module
# Structure of the deep residual part of the network:
# args.depth modules of 2 convolutional layers each at feature map depths of 16, 32, 64
示例5: module_factory
# 需要導入模塊: from neon import layers [as 別名]
# 或者: from neon.layers import Conv [as 別名]
def module_factory(nfm, stride=1):
projection = None if stride == 1 else IdentityInit()
module = [Conv(**conv_params(3, nfm, stride=stride)),
Conv(**conv_params(3, nfm, relu=False))]
module = module if args.network == 'plain' else [ResidualModule(module, projection)]
module.append(Activation(Rectlin()))
return module
# Structure of the deep residual part of the network:
# args.depth modules of 2 convolutional layers each at feature map depths of 16, 32, 64
示例6: gen_model
# 需要導入模塊: from neon import layers [as 別名]
# 或者: from neon.layers import Conv [as 別名]
def gen_model(num_channels, height, width):
assert NervanaObject.be is not None, 'need to generate a backend before using this function'
init_uni = Kaiming()
# we have 1 issue, they have bias layers we don't allow batchnorm and biases
conv_common = dict(padding=1, init=init_uni, activation=Rectlin(), batch_norm=True)
# set up the layers
layers = []
# need to store a ref to the pooling layers to pass
# to the upsampling layers to get the argmax indicies
# for upsampling, this stack holds the pooling layer refs
pool_layers = []
# first loop generates the encoder layers
nchan = [64, 128, 256, 512, 512]
for ind in range(len(nchan)):
nchanu = nchan[ind]
lrng = 2 if ind <= 1 else 3
for lind in range(lrng):
nm = 'conv%d_%d' % (ind+1, lind+1)
layers.append(Conv((3, 3, nchanu), strides=1, name=nm, **conv_common))
layers.append(Pooling(2, strides=2, name='conv%d_pool' % ind))
pool_layers.append(layers[-1])
if ind >= 2:
layers.append(Dropout(keep=0.5, name='drop%d' % (ind+1)))
# this loop generates the decoder layers
for ind in range(len(nchan)-1,-1,-1):
nchanu = nchan[ind]
lrng = 2 if ind <= 1 else 3
# upsampling layers need a ref to the corresponding pooling layer
# to access the argmx indices for upsampling
layers.append(Upsampling(2, pool_layers.pop(), strides=2, padding=0,
name='conv%d_unpool' % ind))
for lind in range(lrng):
nm = 'deconv%d_%d' % (ind+1, lind+1)
if ind < 4 and lind == lrng-1:
nchanu = nchan[ind]/2
layers.append(Conv((3, 3, nchanu), strides=1, name=nm, **conv_common))
if ind == 0:
break
if ind >= 2:
layers.append(Dropout(keep=0.5, name='drop%d' % (ind+1)))
# last conv layer outputs 12 channels, 1 for each output class
# with a pixelwise softmax over the channels
act_last = PixelwiseSoftmax(num_channels, height, width, name="PixelwiseSoftmax")
conv_last = dict(padding=1, init=init_uni, activation=act_last, batch_norm=False)
layers.append(Conv((3, 3, num_channels), strides=1, name='deconv_out', **conv_last))
return layers
示例7: main
# 需要導入模塊: from neon import layers [as 別名]
# 或者: from neon.layers import Conv [as 別名]
def main():
parser = get_parser()
args = parser.parse_args()
print('Args:', args)
loggingLevel = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(level=loggingLevel, format='')
ext = extension_from_parameters(args)
loader = p1b3.DataLoader(feature_subsample=args.feature_subsample,
scaling=args.scaling,
drug_features=args.drug_features,
scramble=args.scramble,
min_logconc=args.min_logconc,
max_logconc=args.max_logconc,
subsample=args.subsample,
category_cutoffs=args.category_cutoffs)
# initializer = Gaussian(loc=0.0, scale=0.01)
initializer = GlorotUniform()
activation = get_function(args.activation)()
layers = []
reshape = None
if args.convolution and args.convolution[0]:
reshape = (1, loader.input_dim, 1)
layer_list = list(range(0, len(args.convolution), 3))
for l, i in enumerate(layer_list):
nb_filter = args.convolution[i]
filter_len = args.convolution[i+1]
stride = args.convolution[i+2]
# print(nb_filter, filter_len, stride)
# fshape: (height, width, num_filters).
layers.append(Conv((1, filter_len, nb_filter), strides={'str_h':1, 'str_w':stride}, init=initializer, activation=activation))
if args.pool:
layers.append(Pooling((1, args.pool)))
for layer in args.dense:
if layer:
layers.append(Affine(nout=layer, init=initializer, activation=activation))
if args.drop:
layers.append(Dropout(keep=(1-args.drop)))
layers.append(Affine(nout=1, init=initializer, activation=neon.transforms.Identity()))
model = Model(layers=layers)
train_iter = ConcatDataIter(loader, ndata=args.train_samples, lshape=reshape, datatype=args.datatype)
val_iter = ConcatDataIter(loader, partition='val', ndata=args.val_samples, lshape=reshape, datatype=args.datatype)
cost = GeneralizedCost(get_function(args.loss)())
optimizer = get_function(args.optimizer)()
callbacks = Callbacks(model, eval_set=val_iter, **args.callback_args)
model.fit(train_iter, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks)