本文整理汇总了Python中neon.models.Model方法的典型用法代码示例。如果您正苦于以下问题:Python models.Model方法的具体用法?Python models.Model怎么用?Python models.Model使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类neon.models
的用法示例。
在下文中一共展示了models.Model方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_model
# 需要导入模块: from neon import models [as 别名]
# 或者: from neon.models import Model [as 别名]
def create_model(args, hyper_params):
# setup layers
imagenet_layers = [
Conv((11, 11, 64), init=Gaussian(scale=0.01), bias=Constant(0), activation=Rectlin(),
padding=3, strides=4),
Pooling(3, strides=2),
Conv((5, 5, 192), init=Gaussian(scale=0.01), bias=Constant(1), activation=Rectlin(),
padding=2),
Pooling(3, strides=2),
Conv((3, 3, 384), init=Gaussian(scale=0.03), bias=Constant(0), activation=Rectlin(),
padding=1),
Conv((3, 3, 256), init=Gaussian(scale=0.03), bias=Constant(1), activation=Rectlin(),
padding=1),
Conv((3, 3, 256), init=Gaussian(scale=0.03), bias=Constant(1), activation=Rectlin(),
padding=1),
Pooling(3, strides=2),
Affine(nout=4096, init=Gaussian(scale=0.01), bias=Constant(1), activation=Rectlin()),
Dropout(keep=0.5),
Affine(nout=4096, init=Gaussian(scale=0.01), bias=Constant(1), activation=Rectlin()),
# The following layers are used in Alexnet, but are not used in the new model
Dropout(keep=0.5),
# Affine(nout=1000, init=Gaussian(scale=0.01), bias=Constant(-7), activation=Softmax())
]
target_layers = imagenet_layers + [
Affine(nout=4096, init=Gaussian(scale=0.005), bias=Constant(.1), activation=Rectlin()),
Dropout(keep=0.5),
Affine(nout=21, init=Gaussian(scale=0.01), bias=Constant(0), activation=Softmax())]
# setup optimizer
opt = GradientDescentMomentum(hyper_params.learning_rate_scale,
hyper_params.momentum, wdecay=0.0005,
schedule=hyper_params.learning_rate_sched)
# setup model
if args.model_file:
model = Model(layers=args.model_file)
else:
model = Model(layers=target_layers)
return model, opt
示例2: create_network
# 需要导入模块: from neon import models [as 别名]
# 或者: from neon.models import Model [as 别名]
def create_network():
init = Kaiming()
padding = dict(pad_d=1, pad_h=1, pad_w=1)
strides = dict(str_d=2, str_h=2, str_w=2)
dilation = dict(dil_d=2, dil_h=2, dil_w=2)
common = dict(init=init, batch_norm=True, activation=Rectlin())
layers = [
Conv((9, 9, 9, 16), padding=padding, strides=strides, init=init, activation=Rectlin()),
Conv((5, 5, 5, 32), dilation=dilation, **common),
Conv((3, 3, 3, 64), dilation=dilation, **common),
Pooling((2, 2, 2), padding=padding, strides=strides),
Conv((2, 2, 2, 128), **common),
Conv((2, 2, 2, 128), **common),
Conv((2, 2, 2, 128), **common),
Conv((2, 2, 2, 256), **common),
Conv((2, 2, 2, 1024), **common),
Conv((2, 2, 2, 4096), **common),
Conv((2, 2, 2, 2048), **common),
Conv((2, 2, 2, 1024), **common),
Dropout(),
Affine(2, init=Kaiming(local=False), batch_norm=True, activation=Softmax())
]
return Model(layers=layers)
# Parse the command line arguments
示例3: __init__
# 需要导入模块: from neon import models [as 别名]
# 或者: from neon.models import Model [as 别名]
def __init__(self, env, args, rng, name = "DQNNeon"):
""" Initializes a network based on the Neon framework.
Args:
env (AtariEnv): The envirnoment in which the agent actuates.
args (argparse.Namespace): All settings either with a default value or set via command line arguments.
rng (mtrand.RandomState): initialized Mersenne Twister pseudo-random number generator.
name (str): The name of the network object.
Note:
This function should always call the base class first to initialize
the common values for the networks.
"""
_logger.info("Initializing new object of type " + str(type(self).__name__))
super(DQNNeon, self).__init__(env, args, rng, name)
self.input_shape = (self.sequence_length,) + self.frame_dims + (self.batch_size,)
self.dummy_batch = np.zeros((self.batch_size, self.sequence_length) + self.frame_dims, dtype=np.uint8)
self.batch_norm = args.batch_norm
self.be = gen_backend(
backend = args.backend,
batch_size = args.batch_size,
rng_seed = args.random_seed,
device_id = args.device_id,
datatype = np.dtype(args.datatype).type,
stochastic_round = args.stochastic_round)
# prepare tensors once and reuse them
self.input = self.be.empty(self.input_shape)
self.input.lshape = self.input_shape # HACK: needed for convolutional networks
self.targets = self.be.empty((self.output_shape, self.batch_size))
# create model
layers = self._create_layer()
self.model = Model(layers = layers)
self.cost_func = GeneralizedCost(costfunc = SumSquared())
# Bug fix
for l in self.model.layers.layers:
l.parallelism = 'Disabled'
self.model.initialize(self.input_shape[:-1], self.cost_func)
self._set_optimizer()
if not self.args.load_weights == None:
self.load_weights(self.args.load_weights)
# create target model
if self.target_update_frequency:
layers = self._create_layer()
self.target_model = Model(layers)
# Bug fix
for l in self.target_model.layers.layers:
l.parallelism = 'Disabled'
self.target_model.initialize(self.input_shape[:-1])
else:
self.target_model = self.model
self.callback = None
_logger.debug("%s" % self)
示例4: main
# 需要导入模块: from neon import models [as 别名]
# 或者: from neon.models import Model [as 别名]
def main():
parser = get_parser()
args = parser.parse_args()
print('Args:', args)
loggingLevel = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(level=loggingLevel, format='')
ext = extension_from_parameters(args)
loader = p1b3.DataLoader(feature_subsample=args.feature_subsample,
scaling=args.scaling,
drug_features=args.drug_features,
scramble=args.scramble,
min_logconc=args.min_logconc,
max_logconc=args.max_logconc,
subsample=args.subsample,
category_cutoffs=args.category_cutoffs)
# initializer = Gaussian(loc=0.0, scale=0.01)
initializer = GlorotUniform()
activation = get_function(args.activation)()
layers = []
reshape = None
if args.convolution and args.convolution[0]:
reshape = (1, loader.input_dim, 1)
layer_list = list(range(0, len(args.convolution), 3))
for l, i in enumerate(layer_list):
nb_filter = args.convolution[i]
filter_len = args.convolution[i+1]
stride = args.convolution[i+2]
# print(nb_filter, filter_len, stride)
# fshape: (height, width, num_filters).
layers.append(Conv((1, filter_len, nb_filter), strides={'str_h':1, 'str_w':stride}, init=initializer, activation=activation))
if args.pool:
layers.append(Pooling((1, args.pool)))
for layer in args.dense:
if layer:
layers.append(Affine(nout=layer, init=initializer, activation=activation))
if args.drop:
layers.append(Dropout(keep=(1-args.drop)))
layers.append(Affine(nout=1, init=initializer, activation=neon.transforms.Identity()))
model = Model(layers=layers)
train_iter = ConcatDataIter(loader, ndata=args.train_samples, lshape=reshape, datatype=args.datatype)
val_iter = ConcatDataIter(loader, partition='val', ndata=args.val_samples, lshape=reshape, datatype=args.datatype)
cost = GeneralizedCost(get_function(args.loss)())
optimizer = get_function(args.optimizer)()
callbacks = Callbacks(model, eval_set=val_iter, **args.callback_args)
model.fit(train_iter, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks)