本文整理匯總了Python中lasagne.regularization.regularize_network_params方法的典型用法代碼示例。如果您正苦於以下問題:Python regularization.regularize_network_params方法的具體用法?Python regularization.regularize_network_params怎麽用?Python regularization.regularize_network_params使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類lasagne.regularization
的用法示例。
在下文中一共展示了regularization.regularize_network_params方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: make_training_functions
# 需要導入模塊: from lasagne import regularization [as 別名]
# 或者: from lasagne.regularization import regularize_network_params [as 別名]
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay):
encode_layer, hidden_layer, ago_layer, network = network_layers;
output = lasagne.layers.get_output(network, deterministic = True);
pred_train = lasagne.layers.get_output(network, deterministic = False);
loss = lasagne.objectives.squared_error(pred_train, target_var).mean() + \
weight_decay * regularization.regularize_network_params(
layer = network, penalty = regularization.l2, tags={'regularizable' : True});
params = layers.get_all_params(network, trainable = True);
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95);
stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95);
encode = lasagne.layers.get_output(encode_layer, deterministic = True);
hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);
smth_act = lasagne.layers.get_output(ago_layer, deterministic = True);
val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]);
train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);
return val_fn, train_fn, stack_train_fn;
示例2: make_training_functions
# 需要導入模塊: from lasagne import regularization [as 別名]
# 或者: from lasagne.regularization import regularize_network_params [as 別名]
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay, learning_rate):
encode_layer, hidden_layer, smth_act_layer, network = network_layers;
output = lasagne.layers.get_output(network, deterministic = True);
pred_train = lasagne.layers.get_output(network, deterministic = False);
loss = lasagne.objectives.squared_error(pred_train, target_var).mean() + \
weight_decay * regularization.regularize_network_params(
layer = network, penalty = regularization.l2, tags={'regularizable' : True});
params = layers.get_all_params(network, trainable = True);
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = learning_rate, momentum = 0.95);
stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = learning_rate, momentum = 0.95);
encode = lasagne.layers.get_output(encode_layer, deterministic = True);
hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);
smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True);
val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]);
train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);
return val_fn, train_fn, stack_train_fn;
示例3: make_training_functions
# 需要導入模塊: from lasagne import regularization [as 別名]
# 或者: from lasagne.regularization import regularize_network_params [as 別名]
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay):
encode_layer, hidden_layer, smth_act_layer, network = network_layers;
output = lasagne.layers.get_output(network, deterministic = True);
loss = lasagne.objectives.squared_error(output, target_var).mean() + \
weight_decay * regularization.regularize_network_params(
layer = network, penalty = regularization.l2, tags={'regularizable' : True});
params = layers.get_all_params(network, trainable = True);
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95);
stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95);
encode = lasagne.layers.get_output(encode_layer, deterministic = True);
hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);
smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True);
val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]);
train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);
return val_fn, train_fn, stack_train_fn;
示例4: make_training_functions
# 需要導入模塊: from lasagne import regularization [as 別名]
# 或者: from lasagne.regularization import regularize_network_params [as 別名]
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay):
encode_layer, hidden_layer, ago_layer, network = network_layers;
output = lasagne.layers.get_output(network, deterministic = True);
loss = lasagne.objectives.squared_error(output, target_var).mean() + \
weight_decay * regularization.regularize_network_params(
layer = network, penalty = regularization.l2, tags={'regularizable' : True});
params = layers.get_all_params(network, trainable = True);
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95);
stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95);
encode = lasagne.layers.get_output(encode_layer, deterministic = True);
hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);
smth_act = lasagne.layers.get_output(ago_layer, deterministic = True);
val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]);
train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);
return val_fn, train_fn, stack_train_fn;
示例5: make_training_functions
# 需要導入模塊: from lasagne import regularization [as 別名]
# 或者: from lasagne.regularization import regularize_network_params [as 別名]
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay):
encode_layer, hidden_layer, network = network_layers;
output = lasagne.layers.get_output(network, deterministic = True);
loss = lasagne.objectives.squared_error(output, target_var).mean() + \
weight_decay * regularization.regularize_network_params(
layer = network, penalty = regularization.l2, tags={'regularizable' : True});
params = layers.get_all_params(network, trainable = True);
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95);
stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95);
encode = lasagne.layers.get_output(encode_layer, deterministic = True);
hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);
val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, output]);
train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);
return val_fn, train_fn, stack_train_fn;
示例6: make_training_functions
# 需要導入模塊: from lasagne import regularization [as 別名]
# 或者: from lasagne.regularization import regularize_network_params [as 別名]
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay):
encode_layer, hidden_layer, he_layer, network = network_layers;
output = lasagne.layers.get_output(network, deterministic = True);
loss = lasagne.objectives.squared_error(output, target_var).mean() + \
weight_decay * regularization.regularize_network_params(
layer = network, penalty = regularization.l2, tags={'regularizable' : True});
params = layers.get_all_params(network, trainable = True);
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95);
stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95);
encode = lasagne.layers.get_output(encode_layer, deterministic = True);
hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);
smth_act = lasagne.layers.get_output(he_layer, deterministic = True);
val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]);
train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);
return val_fn, train_fn, stack_train_fn;
示例7: make_training_functions
# 需要導入模塊: from lasagne import regularization [as 別名]
# 或者: from lasagne.regularization import regularize_network_params [as 別名]
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay):
encode_layer, hidden_layer, he_layer, network = network_layers;
output = lasagne.layers.get_output(network, deterministic = True);
pred_train = lasagne.layers.get_output(network, deterministic = False);
loss = lasagne.objectives.squared_error(pred_train, target_var).mean() + \
weight_decay * regularization.regularize_network_params(
layer = network, penalty = regularization.l2, tags={'regularizable' : True});
params = layers.get_all_params(network, trainable = True);
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95);
stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95);
encode = lasagne.layers.get_output(encode_layer, deterministic = True);
hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);
smth_act = lasagne.layers.get_output(he_layer, deterministic = True);
val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]);
train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);
return val_fn, train_fn, stack_train_fn;
示例8: make_training_functions
# 需要導入模塊: from lasagne import regularization [as 別名]
# 或者: from lasagne.regularization import regularize_network_params [as 別名]
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay):
encode_layer, hidden_layer, network = network_layers;
output = lasagne.layers.get_output(network, deterministic = True);
pred_train = lasagne.layers.get_output(network, deterministic = False);
loss = lasagne.objectives.squared_error(pred_train, target_var).mean() + \
weight_decay * regularization.regularize_network_params(
layer = network, penalty = regularization.l2, tags={'regularizable' : True});
params = layers.get_all_params(network, trainable = True);
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95);
stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95);
encode = lasagne.layers.get_output(encode_layer, deterministic = True);
hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);
val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, output]);
train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);
return val_fn, train_fn, stack_train_fn;
示例9: make_training_functions
# 需要導入模塊: from lasagne import regularization [as 別名]
# 或者: from lasagne.regularization import regularize_network_params [as 別名]
def make_training_functions(network_layers, input_var, target_var, stack_params, weight_decay):
encode_layer, hidden_layer, smth_act_layer, network = network_layers;
output = lasagne.layers.get_output(network, deterministic = True);
loss = lasagne.objectives.squared_error(output, target_var).mean() + \
weight_decay * regularization.regularize_network_params(
layer = network, penalty = regularization.l2, tags={'regularizable' : True});
params = layers.get_all_params(network, trainable = True);
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00001, momentum = 0.95);
stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95);
encode = lasagne.layers.get_output(encode_layer, deterministic = True);
hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);
smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True);
val_fn = theano.function([input_var, target_var], [loss, encode, hidden, smth_act, output]);
train_fn = theano.function([input_var, target_var], loss, updates = updates);
stack_train_fn = theano.function([input_var, target_var], loss, updates = stack_updates);
return val_fn, train_fn, stack_train_fn;
示例10: make_training_functions
# 需要導入模塊: from lasagne import regularization [as 別名]
# 或者: from lasagne.regularization import regularize_network_params [as 別名]
def make_training_functions(network, encode_layer, input_var, aug_var, target_var, stack_params, weight_decay):
output = lasagne.layers.get_output(network, deterministic = True);
loss = lasagne.objectives.squared_error(output, target_var).mean() + \
weight_decay * regularization.regularize_network_params(
layer = network, penalty = regularization.l2, tags={'regularizable' : True});
params = layers.get_all_params(network, trainable = True);
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.0001, momentum = 0.95);
stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.0001, momentum = 0.95);
encode = lasagne.layers.get_output(encode_layer, deterministic = True);
val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, output]);
train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);
return val_fn, train_fn, stack_train_fn;