當前位置: 首頁>>代碼示例>>Python>>正文


Python regularization.l2方法代碼示例

本文整理匯總了Python中lasagne.regularization.l2方法的典型用法代碼示例。如果您正苦於以下問題:Python regularization.l2方法的具體用法?Python regularization.l2怎麽用?Python regularization.l2使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在lasagne.regularization的用法示例。


在下文中一共展示了regularization.l2方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: make_training_functions

# 需要導入模塊: from lasagne import regularization [as 別名]
# 或者: from lasagne.regularization import l2 [as 別名]
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay):
    encode_layer, hidden_layer, ago_layer, network = network_layers;

    output = lasagne.layers.get_output(network, deterministic = True);
    pred_train = lasagne.layers.get_output(network, deterministic = False);
    loss = lasagne.objectives.squared_error(pred_train, target_var).mean() + \
           weight_decay * regularization.regularize_network_params(
                   layer = network, penalty = regularization.l2, tags={'regularizable' : True});

    params = layers.get_all_params(network, trainable = True);
    updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95);
    stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95);

    encode = lasagne.layers.get_output(encode_layer, deterministic = True);
    hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);
    smth_act = lasagne.layers.get_output(ago_layer, deterministic = True);

    val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]);
    train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
    stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);

    return val_fn, train_fn, stack_train_fn; 
開發者ID:SBU-BMI,項目名稱:u24_lymphocyte,代碼行數:24,代碼來源:conv_sup_regression_hseg_4ch_ago.py

示例2: make_training_functions

# 需要導入模塊: from lasagne import regularization [as 別名]
# 或者: from lasagne.regularization import l2 [as 別名]
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay, learning_rate):
    encode_layer, hidden_layer, smth_act_layer, network = network_layers;

    output = lasagne.layers.get_output(network, deterministic = True);
    pred_train = lasagne.layers.get_output(network, deterministic = False);
    loss = lasagne.objectives.squared_error(pred_train, target_var).mean() + \
           weight_decay * regularization.regularize_network_params(
                   layer = network, penalty = regularization.l2, tags={'regularizable' : True});

    params = layers.get_all_params(network, trainable = True);
    updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = learning_rate, momentum = 0.95);
    stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = learning_rate, momentum = 0.95);

    encode = lasagne.layers.get_output(encode_layer, deterministic = True);
    hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);
    smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True);

    val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]);
    train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
    stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);

    return val_fn, train_fn, stack_train_fn; 
開發者ID:SBU-BMI,項目名稱:u24_lymphocyte,代碼行數:24,代碼來源:conv_sup_regression_hseg_4ch_modelsel.py

示例3: make_training_functions

# 需要導入模塊: from lasagne import regularization [as 別名]
# 或者: from lasagne.regularization import l2 [as 別名]
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay):
    encode_layer, hidden_layer, smth_act_layer, network = network_layers;

    output = lasagne.layers.get_output(network, deterministic = True);
    loss = lasagne.objectives.squared_error(output, target_var).mean() + \
           weight_decay * regularization.regularize_network_params(
                   layer = network, penalty = regularization.l2, tags={'regularizable' : True});

    params = layers.get_all_params(network, trainable = True);
    updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95);
    stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95);

    encode = lasagne.layers.get_output(encode_layer, deterministic = True);
    hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);
    smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True);

    val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]);
    train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
    stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);

    return val_fn, train_fn, stack_train_fn; 
開發者ID:SBU-BMI,項目名稱:u24_lymphocyte,代碼行數:23,代碼來源:conv_sup_regression_4ch.py

示例4: make_training_functions

# 需要導入模塊: from lasagne import regularization [as 別名]
# 或者: from lasagne.regularization import l2 [as 別名]
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay):
    encode_layer, hidden_layer, ago_layer, network = network_layers;

    output = lasagne.layers.get_output(network, deterministic = True);
    loss = lasagne.objectives.squared_error(output, target_var).mean() + \
           weight_decay * regularization.regularize_network_params(
                   layer = network, penalty = regularization.l2, tags={'regularizable' : True});

    params = layers.get_all_params(network, trainable = True);
    updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95);
    stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95);

    encode = lasagne.layers.get_output(encode_layer, deterministic = True);
    hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);
    smth_act = lasagne.layers.get_output(ago_layer, deterministic = True);

    val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]);
    train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
    stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);

    return val_fn, train_fn, stack_train_fn; 
開發者ID:SBU-BMI,項目名稱:u24_lymphocyte,代碼行數:23,代碼來源:conv_sup_regression_4ch_ago.py

示例5: make_training_functions

# 需要導入模塊: from lasagne import regularization [as 別名]
# 或者: from lasagne.regularization import l2 [as 別名]
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay):
    encode_layer, hidden_layer, network = network_layers;

    output = lasagne.layers.get_output(network, deterministic = True);
    loss = lasagne.objectives.squared_error(output, target_var).mean() + \
           weight_decay * regularization.regularize_network_params(
                   layer = network, penalty = regularization.l2, tags={'regularizable' : True});

    params = layers.get_all_params(network, trainable = True);
    updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95);
    stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95);

    encode = lasagne.layers.get_output(encode_layer, deterministic = True);
    hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);

    val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, output]);
    train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
    stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);

    return val_fn, train_fn, stack_train_fn; 
開發者ID:SBU-BMI,項目名稱:u24_lymphocyte,代碼行數:22,代碼來源:conv_sup_regression_baseline.py

示例6: make_training_functions

# 需要導入模塊: from lasagne import regularization [as 別名]
# 或者: from lasagne.regularization import l2 [as 別名]
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay):
    encode_layer, hidden_layer, he_layer, network = network_layers;

    output = lasagne.layers.get_output(network, deterministic = True);
    loss = lasagne.objectives.squared_error(output, target_var).mean() + \
           weight_decay * regularization.regularize_network_params(
                   layer = network, penalty = regularization.l2, tags={'regularizable' : True});

    params = layers.get_all_params(network, trainable = True);
    updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95);
    stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95);

    encode = lasagne.layers.get_output(encode_layer, deterministic = True);
    hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);
    smth_act = lasagne.layers.get_output(he_layer, deterministic = True);

    val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]);
    train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
    stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);

    return val_fn, train_fn, stack_train_fn; 
開發者ID:SBU-BMI,項目名稱:u24_lymphocyte,代碼行數:23,代碼來源:conv_sup_regression_4ch_he.py

示例7: make_training_functions

# 需要導入模塊: from lasagne import regularization [as 別名]
# 或者: from lasagne.regularization import l2 [as 別名]
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay):
    encode_layer, hidden_layer, he_layer, network = network_layers;

    output = lasagne.layers.get_output(network, deterministic = True);
    pred_train = lasagne.layers.get_output(network, deterministic = False);
    loss = lasagne.objectives.squared_error(pred_train, target_var).mean() + \
           weight_decay * regularization.regularize_network_params(
                   layer = network, penalty = regularization.l2, tags={'regularizable' : True});

    params = layers.get_all_params(network, trainable = True);
    updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95);
    stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95);

    encode = lasagne.layers.get_output(encode_layer, deterministic = True);
    hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);
    smth_act = lasagne.layers.get_output(he_layer, deterministic = True);

    val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, smth_act, output]);
    train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
    stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);

    return val_fn, train_fn, stack_train_fn; 
開發者ID:SBU-BMI,項目名稱:u24_lymphocyte,代碼行數:24,代碼來源:conv_sup_regression_hseg_4ch_he.py

示例8: make_training_functions

# 需要導入模塊: from lasagne import regularization [as 別名]
# 或者: from lasagne.regularization import l2 [as 別名]
def make_training_functions(network_layers, input_var, aug_var, target_var, stack_params, weight_decay):
    encode_layer, hidden_layer, network = network_layers;

    output = lasagne.layers.get_output(network, deterministic = True);
    pred_train = lasagne.layers.get_output(network, deterministic = False);
    loss = lasagne.objectives.squared_error(pred_train, target_var).mean() + \
           weight_decay * regularization.regularize_network_params(
                   layer = network, penalty = regularization.l2, tags={'regularizable' : True});

    params = layers.get_all_params(network, trainable = True);
    updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00002, momentum = 0.95);
    stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95);

    encode = lasagne.layers.get_output(encode_layer, deterministic = True);
    hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);

    val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, hidden, output]);
    train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
    stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);

    return val_fn, train_fn, stack_train_fn; 
開發者ID:SBU-BMI,項目名稱:u24_lymphocyte,代碼行數:23,代碼來源:conv_sup_regression_hseg_4ch_leaky.py

示例9: make_training_functions

# 需要導入模塊: from lasagne import regularization [as 別名]
# 或者: from lasagne.regularization import l2 [as 別名]
def make_training_functions(network_layers, input_var, target_var, stack_params, weight_decay):
    encode_layer, hidden_layer, smth_act_layer, network = network_layers;

    output = lasagne.layers.get_output(network, deterministic = True);
    loss = lasagne.objectives.squared_error(output, target_var).mean() + \
           weight_decay * regularization.regularize_network_params(
                   layer = network, penalty = regularization.l2, tags={'regularizable' : True});

    params = layers.get_all_params(network, trainable = True);
    updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.00001, momentum = 0.95);
    stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.00001, momentum = 0.95);

    encode = lasagne.layers.get_output(encode_layer, deterministic = True);
    hidden = lasagne.layers.get_output(hidden_layer, deterministic = True);
    smth_act = lasagne.layers.get_output(smth_act_layer, deterministic = True);

    val_fn = theano.function([input_var, target_var], [loss, encode, hidden, smth_act, output]);
    train_fn = theano.function([input_var, target_var], loss, updates = updates);
    stack_train_fn = theano.function([input_var, target_var], loss, updates = stack_updates);

    return val_fn, train_fn, stack_train_fn; 
開發者ID:SBU-BMI,項目名稱:u24_lymphocyte,代碼行數:23,代碼來源:conv_sup_large_regression_syn.py

示例10: make_training_functions

# 需要導入模塊: from lasagne import regularization [as 別名]
# 或者: from lasagne.regularization import l2 [as 別名]
def make_training_functions(network, encode_layer, input_var, aug_var, target_var, stack_params, weight_decay):
    output = lasagne.layers.get_output(network, deterministic = True);
    loss = lasagne.objectives.squared_error(output, target_var).mean() + \
           weight_decay * regularization.regularize_network_params(
                   layer = network, penalty = regularization.l2, tags={'regularizable' : True});

    params = layers.get_all_params(network, trainable = True);
    updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate = 0.0001, momentum = 0.95);
    stack_updates = lasagne.updates.nesterov_momentum(loss, stack_params, learning_rate = 0.0001, momentum = 0.95);

    encode = lasagne.layers.get_output(encode_layer, deterministic = True);

    val_fn = theano.function([input_var, aug_var, target_var], [loss, encode, output]);
    train_fn = theano.function([input_var, aug_var, target_var], loss, updates = updates);
    stack_train_fn = theano.function([input_var, aug_var, target_var], loss, updates = stack_updates);

    return val_fn, train_fn, stack_train_fn; 
開發者ID:SBU-BMI,項目名稱:u24_lymphocyte,代碼行數:19,代碼來源:conv_sup_regression.py


注:本文中的lasagne.regularization.l2方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。