本文整理汇总了Python中neon.initializers.Gaussian方法的典型用法代码示例。如果您正苦于以下问题:Python initializers.Gaussian方法的具体用法?Python initializers.Gaussian怎么用?Python initializers.Gaussian使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类neon.initializers
的用法示例。
在下文中一共展示了initializers.Gaussian方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_model
# 需要导入模块: from neon import initializers [as 别名]
# 或者: from neon.initializers import Gaussian [as 别名]
def create_model(args, hyper_params):
# setup layers
imagenet_layers = [
Conv((11, 11, 64), init=Gaussian(scale=0.01), bias=Constant(0), activation=Rectlin(),
padding=3, strides=4),
Pooling(3, strides=2),
Conv((5, 5, 192), init=Gaussian(scale=0.01), bias=Constant(1), activation=Rectlin(),
padding=2),
Pooling(3, strides=2),
Conv((3, 3, 384), init=Gaussian(scale=0.03), bias=Constant(0), activation=Rectlin(),
padding=1),
Conv((3, 3, 256), init=Gaussian(scale=0.03), bias=Constant(1), activation=Rectlin(),
padding=1),
Conv((3, 3, 256), init=Gaussian(scale=0.03), bias=Constant(1), activation=Rectlin(),
padding=1),
Pooling(3, strides=2),
Affine(nout=4096, init=Gaussian(scale=0.01), bias=Constant(1), activation=Rectlin()),
Dropout(keep=0.5),
Affine(nout=4096, init=Gaussian(scale=0.01), bias=Constant(1), activation=Rectlin()),
# The following layers are used in Alexnet, but are not used in the new model
Dropout(keep=0.5),
# Affine(nout=1000, init=Gaussian(scale=0.01), bias=Constant(-7), activation=Softmax())
]
target_layers = imagenet_layers + [
Affine(nout=4096, init=Gaussian(scale=0.005), bias=Constant(.1), activation=Rectlin()),
Dropout(keep=0.5),
Affine(nout=21, init=Gaussian(scale=0.01), bias=Constant(0), activation=Softmax())]
# setup optimizer
opt = GradientDescentMomentum(hyper_params.learning_rate_scale,
hyper_params.momentum, wdecay=0.0005,
schedule=hyper_params.learning_rate_sched)
# setup model
if args.model_file:
model = Model(layers=args.model_file)
else:
model = Model(layers=target_layers)
return model, opt
示例2: _create_layer
# 需要导入模块: from neon import initializers [as 别名]
# 或者: from neon.initializers import Gaussian [as 别名]
def _create_layer(self):
""" Build a network consistent with the DeepMind Nature paper. """
_logger.debug("Output shape = %d" % self.output_shape)
# create network
init_norm = Gaussian(loc=0.0, scale=0.01)
layers = []
# The first hidden layer convolves 32 filters of 8x8 with stride 4 with the input image and applies a rectifier nonlinearity.
layers.append(
Conv((8, 8, 32),
strides=4,
init=init_norm,
activation=Rectlin(),
batch_norm=self.batch_norm))
# The second hidden layer convolves 64 filters of 4x4 with stride 2, again followed by a rectifier nonlinearity.
layers.append(
Conv((4, 4, 64),
strides=2,
init=init_norm,
activation=Rectlin(),
batch_norm=self.batch_norm))
# This is followed by a third convolutional layer that convolves 64 filters of 3x3 with stride 1 followed by a rectifier.
layers.append(
Conv((3, 3, 64),
strides=1,
init=init_norm,
activation=Rectlin(),
batch_norm=self.batch_norm))
# The final hidden layer is fully-connected and consists of 512 rectifier units.
layers.append(
Affine(
nout=512,
init=init_norm,
activation=Rectlin(),
batch_norm=self.batch_norm))
# The output layer is a fully-connected linear layer with a single output for each valid action.
layers.append(
Affine(
nout= self.output_shape,
init = init_norm))
return layers
示例3: main
# 需要导入模块: from neon import initializers [as 别名]
# 或者: from neon.initializers import Gaussian [as 别名]
def main():
parser = get_parser()
args = parser.parse_args()
print('Args:', args)
loggingLevel = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(level=loggingLevel, format='')
ext = extension_from_parameters(args)
loader = p1b3.DataLoader(feature_subsample=args.feature_subsample,
scaling=args.scaling,
drug_features=args.drug_features,
scramble=args.scramble,
min_logconc=args.min_logconc,
max_logconc=args.max_logconc,
subsample=args.subsample,
category_cutoffs=args.category_cutoffs)
# initializer = Gaussian(loc=0.0, scale=0.01)
initializer = GlorotUniform()
activation = get_function(args.activation)()
layers = []
reshape = None
if args.convolution and args.convolution[0]:
reshape = (1, loader.input_dim, 1)
layer_list = list(range(0, len(args.convolution), 3))
for l, i in enumerate(layer_list):
nb_filter = args.convolution[i]
filter_len = args.convolution[i+1]
stride = args.convolution[i+2]
# print(nb_filter, filter_len, stride)
# fshape: (height, width, num_filters).
layers.append(Conv((1, filter_len, nb_filter), strides={'str_h':1, 'str_w':stride}, init=initializer, activation=activation))
if args.pool:
layers.append(Pooling((1, args.pool)))
for layer in args.dense:
if layer:
layers.append(Affine(nout=layer, init=initializer, activation=activation))
if args.drop:
layers.append(Dropout(keep=(1-args.drop)))
layers.append(Affine(nout=1, init=initializer, activation=neon.transforms.Identity()))
model = Model(layers=layers)
train_iter = ConcatDataIter(loader, ndata=args.train_samples, lshape=reshape, datatype=args.datatype)
val_iter = ConcatDataIter(loader, partition='val', ndata=args.val_samples, lshape=reshape, datatype=args.datatype)
cost = GeneralizedCost(get_function(args.loss)())
optimizer = get_function(args.optimizer)()
callbacks = Callbacks(model, eval_set=val_iter, **args.callback_args)
model.fit(train_iter, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks)