本文整理汇总了Python中lasagne.updates.adagrad方法的典型用法代码示例。如果您正苦于以下问题:Python updates.adagrad方法的具体用法?Python updates.adagrad怎么用?Python updates.adagrad使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lasagne.updates
的用法示例。
在下文中一共展示了updates.adagrad方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_iter_functions
# 需要导入模块: from lasagne import updates [as 别名]
# 或者: from lasagne.updates import adagrad [as 别名]
def create_iter_functions(self, dataset, output_layer, X_tensor_type=T.matrix):
batch_index = T.iscalar('batch_index')
X_batch = X_tensor_type('x')
y_batch = T.ivector('y')
batch_slice = slice(batch_index * self.batch_size, (batch_index + 1) * self.batch_size)
objective = Objective(output_layer, loss_function=categorical_crossentropy)
loss_train = objective.get_loss(X_batch, target=y_batch)
loss_eval = objective.get_loss(X_batch, target=y_batch, deterministic=True)
pred = T.argmax(output_layer.get_output(X_batch, deterministic=True), axis=1)
proba = output_layer.get_output(X_batch, deterministic=True)
accuracy = T.mean(T.eq(pred, y_batch), dtype=theano.config.floatX)
all_params = get_all_params(output_layer)
updates = adagrad(loss_train, all_params, self.lr, self.epsilon)
iter_train = theano.function(
[batch_index], loss_train,
updates=updates,
givens={
X_batch: dataset['X_train'][batch_slice],
y_batch: dataset['y_train'][batch_slice],
},
on_unused_input='ignore',
)
iter_valid = None
if self.use_valid:
iter_valid = theano.function(
[batch_index], [loss_eval, accuracy, proba],
givens={
X_batch: dataset['X_valid'][batch_slice],
y_batch: dataset['y_valid'][batch_slice],
},
)
return dict(train=iter_train, valid=iter_valid)
示例2: create_iter_functions
# 需要导入模块: from lasagne import updates [as 别名]
# 或者: from lasagne.updates import adagrad [as 别名]
def create_iter_functions(self, dataset, output_layer, X_tensor_type=T.matrix):
batch_index = T.iscalar('batch_index')
X_batch = X_tensor_type('x')
y_batch = T.ivector('y')
batch_slice = slice(batch_index * self.batch_size, (batch_index + 1) * self.batch_size)
objective = Objective(output_layer, loss_function=categorical_crossentropy)
loss_train = objective.get_loss(X_batch, target=y_batch)
loss_eval = objective.get_loss(X_batch, target=y_batch, deterministic=True)
pred = T.argmax(output_layer.get_output(X_batch, deterministic=True), axis=1)
proba = output_layer.get_output(X_batch, deterministic=True)
accuracy = T.mean(T.eq(pred, y_batch), dtype=theano.config.floatX)
all_params = get_all_params(output_layer)
updates = adagrad(loss_train, all_params, self.lr, self.rho)
iter_train = theano.function(
[batch_index], loss_train,
updates=updates,
givens={
X_batch: dataset['X_train'][batch_slice],
y_batch: dataset['y_train'][batch_slice],
},
on_unused_input='ignore',
)
iter_valid = None
if self.use_valid:
iter_valid = theano.function(
[batch_index], [loss_eval, accuracy, proba],
givens={
X_batch: dataset['X_valid'][batch_slice],
y_batch: dataset['y_valid'][batch_slice],
},
)
return dict(train=iter_train, valid=iter_valid)
示例3: get_nn_model
# 需要导入模块: from lasagne import updates [as 别名]
# 或者: from lasagne.updates import adagrad [as 别名]
def get_nn_model(shape):
np.random.seed(9)
model = NeuralNet(
layers=[
('input', layers.InputLayer),
('hidden1', layers.DenseLayer),
('hidden2', layers.DenseLayer),
('output', layers.DenseLayer),
],
input_shape=(None, shape[1]),
hidden1_num_units=16, # number of units in hidden layer
hidden1_nonlinearity=sigmoid,
hidden2_num_units=8, # number of units in hidden layer
hidden2_nonlinearity=sigmoid,
output_nonlinearity=softmax,
output_num_units=2, # target values
# optimization method:
update=adagrad,
update_learning_rate=theano.shared(np.float32(0.1)),
on_epoch_finished=[
],
use_label_encoder=False,
batch_iterator_train=BatchIterator(batch_size=500),
regression=False, # flag to indicate we're dealing with regression problem
max_epochs=900, # we want to train this many epochs
verbose=1,
eval_size=0.0,
)
return model
示例4: __init__
# 需要导入模块: from lasagne import updates [as 别名]
# 或者: from lasagne.updates import adagrad [as 别名]
def __init__(self, n_inputs, n_outputs, regression, multiclass=False, depth=5, n_estimators=20, n_hidden=128, learning_rate=0.01, num_epochs=500, pi_iters=20, sgd_iters=10, batch_size=1000, momentum=0.0, dropout=0.0, loss=None, update=adagrad):
"""
Parameters
----------
n_inputs : number of input features
n_outputs : number of classes to predict (1 for regression)
for 2 class classification n_outputs should be 2, not 1
regression : True for regression, False for classification
multiclass : not used
depth : depth of each tree in the ensemble
n_estimators : number of trees in the ensemble
n_hidden : number of neurons in the hidden layer
pi_iters : number of iterations for the iterative algorithm that updates pi
sgd_iters : number of full iterations of sgd between two consequtive updates of pi
loss : theano loss function. If None, squared error will be used for regression and
cross entropy will be used for classification
update : theano update function
"""
self._depth = depth
self._n_estimators = n_estimators
self._n_hidden = n_hidden
self._n_outputs = n_outputs
self._loss = loss
self._regression = regression
self._multiclass = multiclass
self._learning_rate = learning_rate
self._num_epochs = num_epochs
self._pi_iters = pi_iters
self._sgd_iters = sgd_iters
self._batch_size = batch_size
self._momentum = momentum
self._update = update
self.t_input = T.matrix('input')
self.t_label = T.matrix('output')
self._cached_trainable_params = None
self._cached_params = None
self._n_net_out = n_estimators * ((1 << depth) - 1)
self.l_input = InputLayer((None, n_inputs))
self.l_dense1 = DenseLayer(self.l_input, self._n_hidden, nonlinearity=rectify)
if dropout != 0:
self.l_dense1 = DropoutLayer(self.l_dense1, p=dropout)
if not __DEBUG_NO_FOREST__:
self.l_dense2 = DenseLayer(self.l_dense1, self._n_net_out, nonlinearity=sigmoid)
self.l_forest = NeuralForestLayer(self.l_dense2, self._depth, self._n_estimators, self._n_outputs, self._pi_iters)
else:
self.l_forest = DenseLayer(self.l_dense1, self._n_outputs, nonlinearity=softmax)
示例5: get_updates
# 需要导入模块: from lasagne import updates [as 别名]
# 或者: from lasagne.updates import adagrad [as 别名]
def get_updates(nnet,
train_obj,
trainable_params,
solver=None):
implemented_solvers = ("sgd", "momentum", "nesterov", "adagrad", "rmsprop", "adadelta", "adam", "adamax")
if solver not in implemented_solvers:
nnet.sgd_solver = "adam"
else:
nnet.sgd_solver = solver
if nnet.sgd_solver == "sgd":
updates = l_updates.sgd(train_obj,
trainable_params,
learning_rate=Cfg.learning_rate)
elif nnet.sgd_solver == "momentum":
updates = l_updates.momentum(train_obj,
trainable_params,
learning_rate=Cfg.learning_rate,
momentum=Cfg.momentum)
elif nnet.sgd_solver == "nesterov":
updates = l_updates.nesterov_momentum(train_obj,
trainable_params,
learning_rate=Cfg.learning_rate,
momentum=Cfg.momentum)
elif nnet.sgd_solver == "adagrad":
updates = l_updates.adagrad(train_obj,
trainable_params,
learning_rate=Cfg.learning_rate)
elif nnet.sgd_solver == "rmsprop":
updates = l_updates.rmsprop(train_obj,
trainable_params,
learning_rate=Cfg.learning_rate,
rho=Cfg.rho)
elif nnet.sgd_solver == "adadelta":
updates = l_updates.adadelta(train_obj,
trainable_params,
learning_rate=Cfg.learning_rate,
rho=Cfg.rho)
elif nnet.sgd_solver == "adam":
updates = l_updates.adam(train_obj,
trainable_params,
learning_rate=Cfg.learning_rate)
elif nnet.sgd_solver == "adamax":
updates = l_updates.adamax(train_obj,
trainable_params,
learning_rate=Cfg.learning_rate)
return updates