本文整理汇总了Python中nolearn.lasagne.NeuralNet.score方法的典型用法代码示例。如果您正苦于以下问题:Python NeuralNet.score方法的具体用法?Python NeuralNet.score怎么用?Python NeuralNet.score使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nolearn.lasagne.NeuralNet
的用法示例。
在下文中一共展示了NeuralNet.score方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: gridsearch_alpha
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import score [as 别名]
def gridsearch_alpha(self,learning_rate,index,params=None):
hidden_unit = ((index+1)*2)/3
self.l_in = ls.layers.InputLayer(shape=(None,n_input),input_var=None)
self.l_hidden = ls.layers.DenseLayer(self.l_in,num_units=15,nonlinearity=ls.nonlinearities.rectify)
self.network = l_out = ls.layers.DenseLayer(self.l_hidden,num_units=1)
list_results = np.array([learning_rate.shape[0]],dtype=np.float64)
for item in learning_rate:
#Init Neural net
net1 = NeuralNet(
layers=self.network,
# optimization method:
update=nesterov_momentum,
update_learning_rate=item,
update_momentum=0.9,
regression=True, # flag to indicate we're dealing with regression problem
max_epochs=800, # we want to train this many epochs
# verbose=1,
eval_size = 0.4
)
net1.fit(self.X_training,self.y_training)
self.pred = net1.predict(self.n_sample2)
name_file = "Params/saveNeuralNetwork_%s_%s.tdn" %(item,index)
net1.save_params_to(name_file)
score_nn = net1.score(self.n_sample2,self.n_test2)
list_results[item] = score_nn
print "index=%f,item=%f,score=%f"%(index,item,score_nn)
return list_results
示例2: regr
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import score [as 别名]
def regr(X, Y):
l = InputLayer(shape=(None, X.shape[1]))
l = DenseLayer(l, num_units=X.shape[1], nonlinearity=tanh) #tanh, sigmoid
# l = DropoutLayer(l, p=0.3, rescale=True) # previous: p=0.5
l = DenseLayer(l, num_units=1, nonlinearity=sigmoid)
# l = DropoutLayer(l, p=0.3, rescale=True) # previous: p=0.5
net = NeuralNet(l, regression=True, update_learning_rate=0.01, verbose=1, max_epochs=700)
net.fit(X, Y)
print(net.score(X, Y))
return net
示例3: regr
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import score [as 别名]
def regr(X, Y):
l = InputLayer(shape=(None, X.shape[1]))
l = DenseLayer(l, num_units=Y.shape[1]+100, nonlinearity=tanh)
# l = DropoutLayer(l, p=0.3, rescale=True) # previous: p=0.5
l = DenseLayer(l, num_units=Y.shape[1]+50, nonlinearity=tanh)
# l = DropoutLayer(l, p=0.3, rescale=True) # previous: p=0.5
l = DenseLayer(l, num_units=Y.shape[1], nonlinearity=None)
net = NeuralNet(l, regression=True, update_learning_rate=0.1, verbose=1)
net.fit(X, Y)
print(net.score(X, Y))
return net
示例4: test_lasagne_functional_regression
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import score [as 别名]
def test_lasagne_functional_regression(boston):
from nolearn.lasagne import NeuralNet
X, y = boston
layer1 = InputLayer(shape=(128, 13))
layer2 = DenseLayer(layer1, num_units=100)
output = DenseLayer(layer2, num_units=1, nonlinearity=identity)
nn = NeuralNet(
layers=output,
update_learning_rate=0.01,
update_momentum=0.1,
regression=True,
max_epochs=50,
)
nn.fit(X[:300], y[:300])
assert mean_absolute_error(nn.predict(X[300:]), y[300:]) < 3.0
assert r2_score(nn.predict(X[300:]), y[300:]) == nn.score(X[300:], y[300:])
示例5: classify
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import score [as 别名]
def classify(X, y, X_test, y_test):
layers0 = [('input', InputLayer),
('dense0', DenseLayer),
('dropout0', DropoutLayer),
('dense1', DenseLayer),
('dropout1', DropoutLayer),
('output', DenseLayer)]
net = NeuralNet(layers=layers0,
input_shape=(None, X.shape[1]),
dense0_num_units=300,
dropout0_p=0.075,
dropout1_p=0.1,
dense1_num_units=750,
output_num_units=3,
output_nonlinearity=softmax,
update=nesterov_momentum,
update_learning_rate=0.001,
update_momentum=0.99,
eval_size=0.2,
verbose=1,
max_epochs=15)
net.fit(X, y)
print(net.score(X, y))
preds = net.predict(X_test)
print(classification_report(y_test, preds))
cm = confusion_matrix(y_test, preds)
plt.matshow(cm)
plt.title('Confusion matrix')
plt.colorbar()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig('confmatrix.png')
plt.show()
print(cm)
示例6: train_nolearn_model
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import score [as 别名]
def train_nolearn_model(X, y):
'''
NeuralNet with nolearn
'''
X = X.astype(np.float32)
y = y.astype(np.int32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 5)
X_train, X_test = impute_nan(X_train, X_test)
X_train, X_test = normalize_features(X_train, X_test)
lays = [('input', layers.InputLayer),
('hidden', layers.DenseLayer),
('output', layers.DenseLayer),
]
net = NeuralNet(
layers = lays,
input_shape=(None, 23),
hidden_num_units=10,
objective_loss_function=lasagne.objectives.categorical_crossentropy,
output_nonlinearity=lasagne.nonlinearities.sigmoid,
output_num_units=10,
update = nesterov_momentum,
update_learning_rate= 0.001,
update_momentum=0.9,
max_epochs=10,
verbose=1,
)
#net.fit(X_train, y_train)
#predicted = net.predict(X_test)
test_score = net.predict(X_test, y_test)
train_score = net.score(X_train, y_train)
return train_score, test_score
示例7: NeuralNet
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import score [as 别名]
print "X_training shape must match y_training shape"
print "Generate X_test and y_test"
n_input = 11
print "X_test..."
print "Multi Layer Perceptron..."
#Build layer for MLP
l_in = ls.layers.InputLayer(shape=(None,10),input_var=None)
l_hidden = ls.layers.DenseLayer(l_in,num_units=15,nonlinearity=ls.nonlinearities.sigmoid)
network = l_out = ls.layers.DenseLayer(l_hidden,num_units=1)
print "Neural network initialize"
#Init Neural net
net1 = NeuralNet(
layers=network,
# optimization method:
update=nesterov_momentum,
update_learning_rate=0.001,
update_momentum=0.9,
regression=True, # flag to indicate we're dealing with regression problem
max_epochs=400, # we want to train this many epochs
verbose=1,
)
#
print "Training time!!!!!....."
net1.fit(X_training,y_training)
net1.save_params_to("saveNeuralNetwork.tdn")
print "Score rate = "
print net1.score(n_sample2,n_test2)
print net1.predict(n_sample2)[0:2]
示例8: open
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import score [as 别名]
print 'started', datetime.now()
res = net0.fit(features_scaled, labels.astype('int32'))
print 'finished', datetime.now()
print res
# In[2]:
import pickle
pickle.dump(net0, open('lasagne_model.pkl', 'wb'))
# In[106]:
print 'score', net0.score(features_scaled, labels)
# In[111]:
print 'logloss', log_loss(labels, net0.predict_proba(features_scaled))
# In[16]:
test_features, _ = get_features(test)
scaler.fit(test_features)
test_features = scaler.transform(test_features)
示例9: dict
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import score [as 别名]
(FeaturePoolLayer, dict(name='l8p', pool_size=2)),
(DropoutLayer, dict(name='l8drop', p=0.5)),
(DenseLayer, dict(name='out', num_units=10, nonlinearity=nonlinearities.softmax)),
],
regression=False,
objective_loss_function=objectives.categorical_crossentropy,
update=updates.adam,
update_learning_rate=1e-3,
batch_iterator_train=train_iterator,
batch_iterator_test=test_iterator,
on_epoch_finished=[
save_training_history,
plot_training_history,
],
verbose=10,
max_epochs=20
)
if __name__ == '__main__':
X_train, X_test, y_train, y_test = load_data(test_size=0.25, random_state=42)
print "Training Network"
net.fit(X_train, y_train)
score = net.score(X_test, y_test)
print 'Final score %.4f' % score
示例10: build_nn
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import score [as 别名]
def build_nn(df=None, class_column_name=None):
"""
Construct a classification neural network model from input dataframe
Parameters:
df : input dataframe
class_column_name : identity of the column in df with class data
"""
# Type check inputs for sanity
if df is None:
raise ValueError('df is None')
if not isinstance(df, pd.DataFrame):
raise TypeError('df is not a dataframe')
if class_column_name is None:
raise ValueError('class_column_name is None')
if not isinstance(class_column_name, basestring):
raise TypeError('class_column_name is not a string')
if class_column_name not in df.columns:
raise ValueError('class_column_name (%s) is not a valid column name'
% class_column_name)
df = df.sample(frac=1).reset_index(drop=True)
df_train, df_test = train_test_split(df, TEST_SIZE)
df_train, df_val = df_train[:(0.75 * len(df_train.index)), :], df_train[(0.75 * len(df_train.index)):, :]
x_train, x_val, x_test = df_train, df_val, df_test
# Remove the classification column from the dataframe
x_train = x_train.drop(class_column_name, axis=1, inplace=True).values
x_val = x_val.drop(class_column_name, axis=1, inplace=True).values
x_test = x_test.drop(class_column_name, axis=1, inplace=True).values
y_train = df_train[class_column_name].values.astype(np.int32)
y_val = df_val[class_column_name].values.astype(np.int32)
y_test = df_test[class_column_name].values.astype(np.int32)
# Create classification model
net = NeuralNet(layers=[('input', InputLayer),
('hidden0', DenseLayer),
('hidden1', DenseLayer),
('output', DenseLayer)],
input_shape=(None, x_train.shape[1]),
hidden0_num_units=NODES,
hidden0_nonlinearity=nonlinearities.softmax,
hidden1_num_units=NODES,
hidden1_nonlinearity=nonlinearities.softmax,
output_num_units=len(np.unique(y_train)),
output_nonlinearity=nonlinearities.softmax,
update_learning_rate=0.1,
verbose=1,
max_epochs=100)
param_grid = {'hidden0_num_units': [4, 17, 25],
'hidden0_nonlinearity':
[nonlinearities.sigmoid, nonlinearities.softmax],
'hidden1_num_units': [4, 17, 25],
'hidden1_nonlinearity':
[nonlinearities.sigmoid, nonlinearities.softmax],
'update_learning_rate': [0.01, 0.1, 0.5]}
grid_search = GridSearchCV(net, param_grid, verbose=0)
grid_search.fit(x_train, y_train)
net.fit(x_train, y_train)
print(net.score(x_train, y_train))
with open(PICKLE, 'wb') as file:
pickle.dump(x_train, file, pickle.HIGHEST_PROTOCOL)
pickle.dump(y_train, file, pickle.HIGHEST_PROTOCOL)
pickle.dump(df_test, file, pickle.HIGHEST_PROTOCOL)
pickle.dump(grid_search, file, pickle.HIGHEST_PROTOCOL)
pickle.dump(net, file, pickle.HIGHEST_PROTOCOL)
示例11: build_net
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import score [as 别名]
def build_net(train, test, y_scaler):
xs_test, ys_test = test
xs_train, ys_train = train
num_features = xs_train.shape[1]
#assert(num_features == len(feature_extractions().keys()))
loss_function = get_loss_function(y_scaler)
input_var = theano.tensor.dmatrix('inputs')
target_var = theano.tensor.dvector('targets')
# l_in = las.layers.InputLayer((len(xs_test), len(xs_test[0])), input_var=input_var)
# l_recur_a = las.layers.RecurrentLayer(l_in, num_units= 50)
# l_hidden = las.layers.DenseLayer(l_recur_a, num_units = 4,nonlinearity = las.nonlinearities.softmax, W=las.init.Normal(0.1))
# l_recur_b = las.layers.RecurrentLayer(l_hidden, num_units = 4) #Try doing custom
# -----pure classes below
c_l_in = las.layers.InputLayer
c_l_recur_a = las.layers.RecurrentLayer
c_l_hidden = las.layers.DenseLayer
c_l_recur_b = las.layers.RecurrentLayer #Try doing custom
c_expression_layer = las.layers.special.ExpressionLayer
c_output = las.layers.DenseLayer
#layers = [('input', c_l_in), ('a', c_l_recur_a), ('h', c_l_hidden), ('b', c_l_recur_b),('output', c_output)]
layers = [('input', c_l_in), ('h', c_l_hidden), ('h2', c_l_hidden),('h3', c_l_hidden),('h4', c_l_hidden), ('output', c_output)]
print "\nBuilding..."
#o = binary_hinge_loss
net0 = NeuralNet(layers=layers,
regression=True,
y_tensor_type=theano.tensor.type.TensorType('float64', (False, True)) ,
input_shape=(None, num_features),
# input_input_var = input_var,
# a_num_units = 50,
h_num_units=400,
#h_nonlinearity = las.nonlinearities.softmax,
h2_num_units=50,
h3_num_units=20,
h4_num_units=1,
# h2_nonlinearity = las.nonlinearities.softmax,
# b_num_units = 4,
#e_function=expression_layer_fn,
output_num_units=1,
# output_nonlinearity=softmax,
objective_loss_function=loss_function,
update=nesterov_momentum,
update_learning_rate=0.001,
update_momentum=0.3,
train_split=nolearn.lasagne.TrainSplit(eval_size=0.1),
verbose=1,
max_epochs=1000)
print "Begin training"
net0.fit(xs_train, ys_train)
print "y: %f" % (ys_test[0])
print "transformed y: %f" %(y_scaler.inverse_transform([ys_test[0]])[0])
print "\n"
print "y: %f" % (ys_test[1])
print "transformed y: %f" % (y_scaler.inverse_transform([ys_test[1]])[0])
print "\n predictions: :"
print "y: {}".format((net0.predict([xs_test[0], xs_test[1]])))
print y_scaler.inverse_transform(net0.predict([xs_test[0], xs_test[1]]))
# predicts = net0.predict([[30.0,-1.5,4.5,3087],[1.0,1.0,1.0,1.0],[5.0,0.1,5.0,1000]])
# print "\nPrediction: %f - 93864 == %f \n %f - 3 == %f \n %f - 1000000 == %f" % (predicts[0], (predicts[0]-93864)*1.0/93864, predicts[1], (predicts[1] - 3)*1.0/3, predicts[2], (predicts[2]-1000000)/1000000)
# print "\n\nTransformed:"
# predicts = map(lambda x: y_scaler.inverse_transform([x]), predicts)
# predicts = [y[0] for y in predicts]
# print "prediction: %f - 93864 == %f \n %f - 3 == %f \n %f - 1000000 == %f" % (predicts[0], (predicts[0]-93864)*1.0/93864, predicts[1], (predicts[1] - 3)*1.0/3, predicts[2], (predicts[2]-1000000)/1000000)
print "\n Scores:"
print "test score: %f" % (net0.score(xs_test,ys_test))
print net0.score(xs_train,ys_train)
print "random score: %f" % (net0.score(xs_test,ys_train[0:len(xs_test)]))
print "random score: %f" % (net0.score(xs_train[0:len(ys_test)],ys_test))
print net0.layers
示例12: NeuralNet
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import score [as 别名]
NN0 = NeuralNet(layers = layer0,
max_epochs = 10,
# optimization method:
update=adam,
update_learning_rate=0.0002
)
# In[159]:
NN0.fit(x_train, y_train)
# In[160]:
NN0.score(x_vali, y_vali)
# the accuracy is only 0.95, lower than the score of random forest of 0.966
# # model 1: one input layer, two hidden layers, and one output layer
# In[172]:
layer1=[(layers.InputLayer, {'shape': (None, 1, 28, 28)}),
(layers.DenseLayer, {'num_units':1000}),
(layers.DropoutLayer, {}),
(layers.DenseLayer, {'num_units':1000}),
(layers.DenseLayer, {'num_units':10, 'nonlinearity': softmax})]
# In[173]: