本文整理汇总了Python中lasagne.updates.adam方法的典型用法代码示例。如果您正苦于以下问题:Python updates.adam方法的具体用法?Python updates.adam怎么用?Python updates.adam使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lasagne.updates
的用法示例。
在下文中一共展示了updates.adam方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: model_initial
# 需要导入模块: from lasagne import updates [as 别名]
# 或者: from lasagne.updates import adam [as 别名]
def model_initial(X_train,y_train,max_iter = 5):
global params, val_acc
params = []
val_acc = np.zeros(max_iter)
lr = theano.shared(np.float32(1e-4))
for iteration in range(max_iter):
print 'Initializing weights (%d/5) ...'%(iteration+1)
network_init = create_network()
net_init = NeuralNet(
network_init,
max_epochs=3,
update=adam,
update_learning_rate=lr,
train_split=TrainSplit(eval_size=0.1),
batch_iterator_train=BatchIterator(batch_size=32),
batch_iterator_test=BatchIterator(batch_size=64),
on_training_finished=[SaveTrainHistory(iteration = iteration)],
verbose=0)
net_init.initialize()
net_init.fit(X_train, y_train)
#model training
示例2: model_train
# 需要导入模块: from lasagne import updates [as 别名]
# 或者: from lasagne.updates import adam [as 别名]
def model_train(X_train, y_train,learning_rate = 1e-4,epochs = 50):
network = create_network()
lr = theano.shared(np.float32(learning_rate))
net = NeuralNet(
network,
max_epochs=epochs,
update=adam,
update_learning_rate=lr,
train_split=TrainSplit(eval_size=0.1),
batch_iterator_train=BatchIterator(batch_size=32),
batch_iterator_test=BatchIterator(batch_size=64),
#on_training_started=[LoadBestParam(iteration=val_acc.argmax())],
on_epoch_finished=[EarlyStopping(patience=5)],
verbose=1)
print 'Loading pre-training weights...'
net.load_params_from(params[val_acc.argmax()])
print 'Continue to train...'
net.fit(X_train, y_train)
print 'Model training finished.'
return net
#model testing
示例3: model_train
# 需要导入模块: from lasagne import updates [as 别名]
# 或者: from lasagne.updates import adam [as 别名]
def model_train(X_train, y_train,learning_rate = 1e-4,epochs = 50):
network = create_network()
lr = theano.shared(np.float32(learning_rate))
net = NeuralNet(
network,
max_epochs=epochs,
update=adam,
update_learning_rate=lr,
train_split=TrainSplit(eval_size=0.1),
batch_iterator_train=BatchIterator(batch_size=32),
batch_iterator_test=BatchIterator(batch_size=64),
regression = True,
objective_loss_function = squared_error,
#on_training_started=[LoadBestParam(iteration=val_loss.argmin())],
on_epoch_finished=[EarlyStopping(patience=5)],
verbose=1)
print 'loading pre-training weights...'
net.load_params_from(params[val_loss.argmin()])
print 'continue to train...'
net.fit(X_train, y_train)
print 'training finished'
return net
#model testing
示例4: net_updates
# 需要导入模块: from lasagne import updates [as 别名]
# 或者: from lasagne.updates import adam [as 别名]
def net_updates(net, loss, lr):
# Get all trainable parameters (weights) of our net
params = l.get_all_params(net, trainable=True)
# We use the adam update, other options are available
if cfg.OPTIMIZER == 'adam':
param_updates = updates.adam(loss, params, learning_rate=lr, beta1=0.9)
elif cfg.OPTIMIZER == 'nesterov':
param_updates = updates.nesterov_momentum(loss, params, learning_rate=lr, momentum=0.9)
elif cfg.OPTIMIZER == 'sgd':
param_updates = updates.sgd(loss, params, learning_rate=lr)
return param_updates
#################### TRAIN FUNCTION #####################
示例5: model_initial
# 需要导入模块: from lasagne import updates [as 别名]
# 或者: from lasagne.updates import adam [as 别名]
def model_initial(X_train,y_train,max_iter = 5):
global params, val_loss
params = []
val_loss = np.zeros(max_iter)
lr = theano.shared(np.float32(1e-4))
for iteration in range(max_iter):
print 'initializing weights (%d/5) ...'%(iteration+1)
print iteration
network_init = create_network()
net_init = NeuralNet(
network_init,
max_epochs=3,
update=adam,
update_learning_rate=lr,
train_split=TrainSplit(eval_size=0.1),
batch_iterator_train=BatchIterator(batch_size=32),
batch_iterator_test=BatchIterator(batch_size=64),
regression = True,
objective_loss_function = squared_error,
on_training_finished=[SaveTrainHistory(iteration = iteration)],
verbose=0)
net_init.initialize()
net_init.fit(X_train, y_train)
#model training
示例6: get_update_adam
# 需要导入模块: from lasagne import updates [as 别名]
# 或者: from lasagne.updates import adam [as 别名]
def get_update_adam():
"""
Compute update with momentum
"""
def update(all_grads, all_params, learning_rate):
""" Compute updates from gradients """
return adam(all_grads, all_params, learning_rate)
return update
示例7: get_estimator
# 需要导入模块: from lasagne import updates [as 别名]
# 或者: from lasagne.updates import adam [as 别名]
def get_estimator(n_features, files, labels, eval_size=0.1):
layers = [
(InputLayer, {'shape': (None, n_features)}),
(DenseLayer, {'num_units': N_HIDDEN_1, 'nonlinearity': rectify,
'W': init.Orthogonal('relu'),
'b': init.Constant(0.01)}),
(FeaturePoolLayer, {'pool_size': 2}),
(DenseLayer, {'num_units': N_HIDDEN_2, 'nonlinearity': rectify,
'W': init.Orthogonal('relu'),
'b': init.Constant(0.01)}),
(FeaturePoolLayer, {'pool_size': 2}),
(DenseLayer, {'num_units': 1, 'nonlinearity': None}),
]
args = dict(
update=adam,
update_learning_rate=theano.shared(util.float32(START_LR)),
batch_iterator_train=ResampleIterator(BATCH_SIZE),
batch_iterator_test=BatchIterator(BATCH_SIZE),
objective=nn.get_objective(l1=L1, l2=L2),
eval_size=eval_size,
custom_score=('kappa', util.kappa) if eval_size > 0.0 else None,
on_epoch_finished=[
nn.Schedule('update_learning_rate', SCHEDULE),
],
regression=True,
max_epochs=N_ITER,
verbose=1,
)
net = BlendNet(layers, **args)
net.set_split(files, labels)
return net
示例8: get_updates
# 需要导入模块: from lasagne import updates [as 别名]
# 或者: from lasagne.updates import adam [as 别名]
def get_updates(nnet,
train_obj,
trainable_params,
solver=None):
implemented_solvers = ("sgd", "momentum", "nesterov", "adagrad", "rmsprop", "adadelta", "adam", "adamax")
if solver not in implemented_solvers:
nnet.sgd_solver = "adam"
else:
nnet.sgd_solver = solver
if nnet.sgd_solver == "sgd":
updates = l_updates.sgd(train_obj,
trainable_params,
learning_rate=Cfg.learning_rate)
elif nnet.sgd_solver == "momentum":
updates = l_updates.momentum(train_obj,
trainable_params,
learning_rate=Cfg.learning_rate,
momentum=Cfg.momentum)
elif nnet.sgd_solver == "nesterov":
updates = l_updates.nesterov_momentum(train_obj,
trainable_params,
learning_rate=Cfg.learning_rate,
momentum=Cfg.momentum)
elif nnet.sgd_solver == "adagrad":
updates = l_updates.adagrad(train_obj,
trainable_params,
learning_rate=Cfg.learning_rate)
elif nnet.sgd_solver == "rmsprop":
updates = l_updates.rmsprop(train_obj,
trainable_params,
learning_rate=Cfg.learning_rate,
rho=Cfg.rho)
elif nnet.sgd_solver == "adadelta":
updates = l_updates.adadelta(train_obj,
trainable_params,
learning_rate=Cfg.learning_rate,
rho=Cfg.rho)
elif nnet.sgd_solver == "adam":
updates = l_updates.adam(train_obj,
trainable_params,
learning_rate=Cfg.learning_rate)
elif nnet.sgd_solver == "adamax":
updates = l_updates.adamax(train_obj,
trainable_params,
learning_rate=Cfg.learning_rate)
return updates