本文整理汇总了Python中neon.layers.GeneralizedCost方法的典型用法代码示例。如果您正苦于以下问题:Python layers.GeneralizedCost方法的具体用法?Python layers.GeneralizedCost怎么用?Python layers.GeneralizedCost使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类neon.layers
的用法示例。
在下文中一共展示了layers.GeneralizedCost方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from neon import layers [as 别名]
# 或者: from neon.layers import GeneralizedCost [as 别名]
def __init__(self, env, args, rng, name = "DQNNeon"):
""" Initializes a network based on the Neon framework.
Args:
env (AtariEnv): The envirnoment in which the agent actuates.
args (argparse.Namespace): All settings either with a default value or set via command line arguments.
rng (mtrand.RandomState): initialized Mersenne Twister pseudo-random number generator.
name (str): The name of the network object.
Note:
This function should always call the base class first to initialize
the common values for the networks.
"""
_logger.info("Initializing new object of type " + str(type(self).__name__))
super(DQNNeon, self).__init__(env, args, rng, name)
self.input_shape = (self.sequence_length,) + self.frame_dims + (self.batch_size,)
self.dummy_batch = np.zeros((self.batch_size, self.sequence_length) + self.frame_dims, dtype=np.uint8)
self.batch_norm = args.batch_norm
self.be = gen_backend(
backend = args.backend,
batch_size = args.batch_size,
rng_seed = args.random_seed,
device_id = args.device_id,
datatype = np.dtype(args.datatype).type,
stochastic_round = args.stochastic_round)
# prepare tensors once and reuse them
self.input = self.be.empty(self.input_shape)
self.input.lshape = self.input_shape # HACK: needed for convolutional networks
self.targets = self.be.empty((self.output_shape, self.batch_size))
# create model
layers = self._create_layer()
self.model = Model(layers = layers)
self.cost_func = GeneralizedCost(costfunc = SumSquared())
# Bug fix
for l in self.model.layers.layers:
l.parallelism = 'Disabled'
self.model.initialize(self.input_shape[:-1], self.cost_func)
self._set_optimizer()
if not self.args.load_weights == None:
self.load_weights(self.args.load_weights)
# create target model
if self.target_update_frequency:
layers = self._create_layer()
self.target_model = Model(layers)
# Bug fix
for l in self.target_model.layers.layers:
l.parallelism = 'Disabled'
self.target_model.initialize(self.input_shape[:-1])
else:
self.target_model = self.model
self.callback = None
_logger.debug("%s" % self)
示例2: main
# 需要导入模块: from neon import layers [as 别名]
# 或者: from neon.layers import GeneralizedCost [as 别名]
def main():
parser = get_parser()
args = parser.parse_args()
print('Args:', args)
loggingLevel = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(level=loggingLevel, format='')
ext = extension_from_parameters(args)
loader = p1b3.DataLoader(feature_subsample=args.feature_subsample,
scaling=args.scaling,
drug_features=args.drug_features,
scramble=args.scramble,
min_logconc=args.min_logconc,
max_logconc=args.max_logconc,
subsample=args.subsample,
category_cutoffs=args.category_cutoffs)
# initializer = Gaussian(loc=0.0, scale=0.01)
initializer = GlorotUniform()
activation = get_function(args.activation)()
layers = []
reshape = None
if args.convolution and args.convolution[0]:
reshape = (1, loader.input_dim, 1)
layer_list = list(range(0, len(args.convolution), 3))
for l, i in enumerate(layer_list):
nb_filter = args.convolution[i]
filter_len = args.convolution[i+1]
stride = args.convolution[i+2]
# print(nb_filter, filter_len, stride)
# fshape: (height, width, num_filters).
layers.append(Conv((1, filter_len, nb_filter), strides={'str_h':1, 'str_w':stride}, init=initializer, activation=activation))
if args.pool:
layers.append(Pooling((1, args.pool)))
for layer in args.dense:
if layer:
layers.append(Affine(nout=layer, init=initializer, activation=activation))
if args.drop:
layers.append(Dropout(keep=(1-args.drop)))
layers.append(Affine(nout=1, init=initializer, activation=neon.transforms.Identity()))
model = Model(layers=layers)
train_iter = ConcatDataIter(loader, ndata=args.train_samples, lshape=reshape, datatype=args.datatype)
val_iter = ConcatDataIter(loader, partition='val', ndata=args.val_samples, lshape=reshape, datatype=args.datatype)
cost = GeneralizedCost(get_function(args.loss)())
optimizer = get_function(args.optimizer)()
callbacks = Callbacks(model, eval_set=val_iter, **args.callback_args)
model.fit(train_iter, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks)