本文整理汇总了Python中lasagne.regularization.l2方法的典型用法代码示例。如果您正苦于以下问题:Python regularization.l2方法的具体用法?Python regularization.l2怎么用?Python regularization.l2使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lasagne.regularization
的用法示例。
在下文中一共展示了regularization.l2方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: make_training_functions
# 需要导入模块: from lasagne import regularization [as 别名]
# 或者: from lasagne.regularization import l2 [as 别名]
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay):
encode_layer, hidden_layer, ago_layer, network = network_layers;
output = lasagne.layers.get_output(network, deterministic = True);
pred_train = lasagne.layers.get_output(network, deterministic = False);
loss = lasagne.objectives.squared_error(pred_train, target_var).mean() + \
weight_decay * regularization.regularize_network_params(
layer = network, penalty = regularization.l2, tags={'regularizable' : True});
params = layers.get_all_params(network, trainable = True);
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95);
stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95);
encode = lasagne.layers.get_output(encode_layer, deterministic = True);
hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);
smth_act = lasagne.layers.get_output(ago_layer, deterministic = True);
val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]);
train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);
return val_fn, train_fn, stack_train_fn;
示例2: make_training_functions
# 需要导入模块: from lasagne import regularization [as 别名]
# 或者: from lasagne.regularization import l2 [as 别名]
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay, learning_rate):
encode_layer, hidden_layer, smth_act_layer, network = network_layers;
output = lasagne.layers.get_output(network, deterministic = True);
pred_train = lasagne.layers.get_output(network, deterministic = False);
loss = lasagne.objectives.squared_error(pred_train, target_var).mean() + \
weight_decay * regularization.regularize_network_params(
layer = network, penalty = regularization.l2, tags={'regularizable' : True});
params = layers.get_all_params(network, trainable = True);
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = learning_rate, momentum = 0.95);
stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = learning_rate, momentum = 0.95);
encode = lasagne.layers.get_output(encode_layer, deterministic = True);
hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);
smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True);
val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]);
train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);
return val_fn, train_fn, stack_train_fn;
示例3: make_training_functions
# 需要导入模块: from lasagne import regularization [as 别名]
# 或者: from lasagne.regularization import l2 [as 别名]
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay):
encode_layer, hidden_layer, smth_act_layer, network = network_layers;
output = lasagne.layers.get_output(network, deterministic = True);
loss = lasagne.objectives.squared_error(output, target_var).mean() + \
weight_decay * regularization.regularize_network_params(
layer = network, penalty = regularization.l2, tags={'regularizable' : True});
params = layers.get_all_params(network, trainable = True);
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95);
stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95);
encode = lasagne.layers.get_output(encode_layer, deterministic = True);
hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);
smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True);
val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]);
train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);
return val_fn, train_fn, stack_train_fn;
示例4: make_training_functions
# 需要导入模块: from lasagne import regularization [as 别名]
# 或者: from lasagne.regularization import l2 [as 别名]
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay):
encode_layer, hidden_layer, ago_layer, network = network_layers;
output = lasagne.layers.get_output(network, deterministic = True);
loss = lasagne.objectives.squared_error(output, target_var).mean() + \
weight_decay * regularization.regularize_network_params(
layer = network, penalty = regularization.l2, tags={'regularizable' : True});
params = layers.get_all_params(network, trainable = True);
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95);
stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95);
encode = lasagne.layers.get_output(encode_layer, deterministic = True);
hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);
smth_act = lasagne.layers.get_output(ago_layer, deterministic = True);
val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]);
train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);
return val_fn, train_fn, stack_train_fn;
示例5: make_training_functions
# 需要导入模块: from lasagne import regularization [as 别名]
# 或者: from lasagne.regularization import l2 [as 别名]
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay):
encode_layer, hidden_layer, network = network_layers;
output = lasagne.layers.get_output(network, deterministic = True);
loss = lasagne.objectives.squared_error(output, target_var).mean() + \
weight_decay * regularization.regularize_network_params(
layer = network, penalty = regularization.l2, tags={'regularizable' : True});
params = layers.get_all_params(network, trainable = True);
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95);
stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95);
encode = lasagne.layers.get_output(encode_layer, deterministic = True);
hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);
val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, output]);
train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);
return val_fn, train_fn, stack_train_fn;
示例6: make_training_functions
# 需要导入模块: from lasagne import regularization [as 别名]
# 或者: from lasagne.regularization import l2 [as 别名]
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay):
encode_layer, hidden_layer, he_layer, network = network_layers;
output = lasagne.layers.get_output(network, deterministic = True);
loss = lasagne.objectives.squared_error(output, target_var).mean() + \
weight_decay * regularization.regularize_network_params(
layer = network, penalty = regularization.l2, tags={'regularizable' : True});
params = layers.get_all_params(network, trainable = True);
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95);
stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95);
encode = lasagne.layers.get_output(encode_layer, deterministic = True);
hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);
smth_act = lasagne.layers.get_output(he_layer, deterministic = True);
val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]);
train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);
return val_fn, train_fn, stack_train_fn;
示例7: make_training_functions
# 需要导入模块: from lasagne import regularization [as 别名]
# 或者: from lasagne.regularization import l2 [as 别名]
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay):
encode_layer, hidden_layer, he_layer, network = network_layers;
output = lasagne.layers.get_output(network, deterministic = True);
pred_train = lasagne.layers.get_output(network, deterministic = False);
loss = lasagne.objectives.squared_error(pred_train, target_var).mean() + \
weight_decay * regularization.regularize_network_params(
layer = network, penalty = regularization.l2, tags={'regularizable' : True});
params = layers.get_all_params(network, trainable = True);
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95);
stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95);
encode = lasagne.layers.get_output(encode_layer, deterministic = True);
hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);
smth_act = lasagne.layers.get_output(he_layer, deterministic = True);
val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]);
train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);
return val_fn, train_fn, stack_train_fn;
示例8: make_training_functions
# 需要导入模块: from lasagne import regularization [as 别名]
# 或者: from lasagne.regularization import l2 [as 别名]
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay):
encode_layer, hidden_layer, network = network_layers;
output = lasagne.layers.get_output(network, deterministic = True);
pred_train = lasagne.layers.get_output(network, deterministic = False);
loss = lasagne.objectives.squared_error(pred_train, target_var).mean() + \
weight_decay * regularization.regularize_network_params(
layer = network, penalty = regularization.l2, tags={'regularizable' : True});
params = layers.get_all_params(network, trainable = True);
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95);
stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95);
encode = lasagne.layers.get_output(encode_layer, deterministic = True);
hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);
val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, output]);
train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);
return val_fn, train_fn, stack_train_fn;
示例9: make_training_functions
# 需要导入模块: from lasagne import regularization [as 别名]
# 或者: from lasagne.regularization import l2 [as 别名]
def make_training_functions(network_layers, input_var, target_var, stack_params, weight_decay):
encode_layer, hidden_layer, smth_act_layer, network = network_layers;
output = lasagne.layers.get_output(network, deterministic = True);
loss = lasagne.objectives.squared_error(output, target_var).mean() + \
weight_decay * regularization.regularize_network_params(
layer = network, penalty = regularization.l2, tags={'regularizable' : True});
params = layers.get_all_params(network, trainable = True);
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00001, momentum = 0.95);
stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95);
encode = lasagne.layers.get_output(encode_layer, deterministic = True);
hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);
smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True);
val_fn = theano.function([input_var, target_var], [loss, encode, hidden, smth_act, output]);
train_fn = theano.function([input_var, target_var], loss, updates = updates);
stack_train_fn = theano.function([input_var, target_var], loss, updates = stack_updates);
return val_fn, train_fn, stack_train_fn;
示例10: make_training_functions
# 需要导入模块: from lasagne import regularization [as 别名]
# 或者: from lasagne.regularization import l2 [as 别名]
def make_training_functions(network, encode_layer, input_var, aug_var, target_var, stack_params, weight_decay):
output = lasagne.layers.get_output(network, deterministic = True);
loss = lasagne.objectives.squared_error(output, target_var).mean() + \
weight_decay * regularization.regularize_network_params(
layer = network, penalty = regularization.l2, tags={'regularizable' : True});
params = layers.get_all_params(network, trainable = True);
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.0001, momentum = 0.95);
stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.0001, momentum = 0.95);
encode = lasagne.layers.get_output(encode_layer, deterministic = True);
val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, output]);
train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);
return val_fn, train_fn, stack_train_fn;