本文整理匯總了Python中sklearn.neural_network.MLPRegressor.score方法的典型用法代碼示例。如果您正苦於以下問題:Python MLPRegressor.score方法的具體用法?Python MLPRegressor.score怎麽用?Python MLPRegressor.score使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sklearn.neural_network.MLPRegressor
的用法示例。
在下文中一共展示了MLPRegressor.score方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_lbfgs_regression
# 需要導入模塊: from sklearn.neural_network import MLPRegressor [as 別名]
# 或者: from sklearn.neural_network.MLPRegressor import score [as 別名]
def test_lbfgs_regression():
# Test lbfgs on the boston dataset, a regression problems.
X = Xboston
y = yboston
for activation in ACTIVATION_TYPES:
mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X, y)
if activation == 'identity':
assert_greater(mlp.score(X, y), 0.84)
else:
# Non linear models perform much better than linear bottleneck:
assert_greater(mlp.score(X, y), 0.95)
示例2: test_multioutput_regression
# 需要導入模塊: from sklearn.neural_network import MLPRegressor [as 別名]
# 或者: from sklearn.neural_network.MLPRegressor import score [as 別名]
def test_multioutput_regression():
# Test that multi-output regression works as expected
X, y = make_regression(n_samples=200, n_targets=5)
mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50, max_iter=200,
random_state=1)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.9)
示例3: test_lbfgs_regression
# 需要導入模塊: from sklearn.neural_network import MLPRegressor [as 別名]
# 或者: from sklearn.neural_network.MLPRegressor import score [as 別名]
def test_lbfgs_regression():
# Test lbfgs on the boston dataset, a regression problems."""
X = Xboston
y = yboston
for activation in ACTIVATION_TYPES:
mlp = MLPRegressor(algorithm='l-bfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.95)
示例4: test_partial_fit_regression
# 需要導入模塊: from sklearn.neural_network import MLPRegressor [as 別名]
# 或者: from sklearn.neural_network.MLPRegressor import score [as 別名]
def test_partial_fit_regression():
# Test partial_fit on regression.
# `partial_fit` should yield the same results as 'fit' for regression.
X = Xboston
y = yboston
for momentum in [0, .9]:
mlp = MLPRegressor(solver='sgd', max_iter=100, activation='relu',
random_state=1, learning_rate_init=0.01,
batch_size=X.shape[0], momentum=momentum)
with warnings.catch_warnings(record=True):
# catch convergence warning
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPRegressor(solver='sgd', activation='relu',
learning_rate_init=0.01, random_state=1,
batch_size=X.shape[0], momentum=momentum)
for i in range(100):
mlp.partial_fit(X, y)
pred2 = mlp.predict(X)
assert_almost_equal(pred1, pred2, decimal=2)
score = mlp.score(X, y)
assert_greater(score, 0.75)
示例5: MLPRegressor
# 需要導入模塊: from sklearn.neural_network import MLPRegressor [as 別名]
# 或者: from sklearn.neural_network.MLPRegressor import score [as 別名]
#Example with a Regressor using the scikit-learn library
# example for the XOr gate
from sklearn.neural_network import MLPRegressor
X = [[0., 0.],[0., 1.], [1., 0.], [1., 1.]] # each one of the entries 00 01 10 11
y = [0, 1, 1, 0] # outputs for each one of the entries
# check http://scikit-learn.org/dev/modules/generated/sklearn.neural_network.MLPRegressor.html#sklearn.neural_network.MLPRegressor
#for more details
reg = MLPRegressor(hidden_layer_sizes=(5),activation='tanh', algorithm='sgd', alpha=0.001, learning_rate='constant',
max_iter=10000, random_state=None, verbose=False, warm_start=False, momentum=0.8, tol=10e-8, shuffle=False)
reg.fit(X,y)
outp = reg.predict([[0., 0.],[0., 1.], [1., 0.], [1., 1.]])
print'Results:'
print '0 0 0:', outp[0]
print '0 1 1:', outp[1]
print '1 0 1:', outp[2]
print '1 1 0:', outp[0]
print'Score:', reg.score(X, y)
示例6: print
# 需要導入模塊: from sklearn.neural_network import MLPRegressor [as 別名]
# 或者: from sklearn.neural_network.MLPRegressor import score [as 別名]
plt.rc('font', **font)
fig, axes = plt.subplots(nrows=1, ncols=1)
axes.set_title("Data: " + file)
axes.set_ylabel('Normalized distant count')
axes.set_xlabel('Distance ($\AA$)')
axes.hist(y_train, 150, color='blue',normed=True, label='plot',linewidth=2,alpha=1.0)
plt.show()
"""
# Fit model
clf.fit(X_train, y_train)
# Compute and print r^2 score
print(clf.score(X_test, y_test))
# Store predicted energies
Ecmp = clf.predict(X_test)
Ecmp = gt.hatokcal * (Ecmp)
Eact = gt.hatokcal * (y_test)
# Compute RMSE in kcal/mol
rmse = gt.calculaterootmeansqrerror(Ecmp, Eact)
# End timer
_t1e = tm.time()
print("Computation complete. Time: " + "{:.4f}".format((_t1e - _t1b)) + "s")
# Output model information
示例7: TSnew
# 需要導入模塊: from sklearn.neural_network import MLPRegressor [as 別名]
# 或者: from sklearn.neural_network.MLPRegressor import score [as 別名]
#.........這裏部分代碼省略.........
self.df.iloc[:, k] = v
def preprocess(self, removeColumnsWithMissingValues = False):
print("DEB Preprocessing")
m = self.df.as_matrix()
# it is possible to encode enumerable features and to remove missing values
with open('enumerable_columns.txt') as f: # e.g., self.enumerable_columns = [0, 5, 8]
self.enumerable_columns = f.read()
if self.enumerable_columns.__contains__(','):
self.enumerable_columns = list(map(int, self.enumerable_columns.split(',')))
else:
self.enumerable_columns = [int(self.enumerable_columns)]
print("enumerable columns are: " + str(self.enumerable_columns))
le = preprocessing.LabelEncoder()
for col in self.enumerable_columns:
# if the column is enumerable
self.df[self.header[col]] = le.fit_transform(self.df[self.header[col]]) # A -> 0, B -> 1, ...
# remove cols with missing values (NaN), even though you risk to reduce too much the dataset
if removeColumnsWithMissingValues:
for i in range(0, m.shape[1]):
if True in m[:, i]:
self.df = numpy.delete(self.df, 0, i) # delete column
############## MPL architecture #######################
def createTrainingAndTestSet(self):
print("DEB Create Training set. Using formula 80-20%")
self.trainSet, self.testSet = train_test_split(self.df, test_size=0.20)
# hearth of the algorithm!
def createTSnew(self):
print("DEB Create TS new")
for i in range(0, self.trainSet.shape[0]):
for j in range(0, self.repeatSometimes):
# choose small random subset of features X_hat
X_hat = [int(self.trainSet.shape[1] * random.random()) for i in range(0, self.dim_random_subset)]
# insert into TSnew the sample: (x1...X_hat = 0 ... xk ; x1...xk)
row = numpy.copy(self.trainSet.as_matrix()[i, :])
for feature in X_hat: # here you set the random features to 0. X_hat represents the indices of such features
row[feature] = 0
self.TSnew_X = self.TSnew_X.append(pandas.DataFrame(row.reshape(-1, len(row)))) # append row to TSnew_X
copy = numpy.copy(self.trainSet.as_matrix()[i, :])
self.TSnew_Y = self.TSnew_Y.append(pandas.DataFrame(copy.reshape(-1, len(copy)))) # Y = x1...xk
############## Train & Predict ########################
def train(self):
print("DEB Training with TSnew")
self.MLP = MLPRegressor(activation='relu', alpha=1e-05, batch_size='auto', beta_1=0.9,
beta_2=0.999, early_stopping=False, epsilon=1e-08,
hidden_layer_sizes=len(self.TSnew_Y.columns), learning_rate='constant',
learning_rate_init=0.001, max_iter=200, momentum=0.9,
nesterovs_momentum=True, power_t=0.5, random_state=1, shuffle=True,
solver='lbfgs', tol=0.0001, validation_fraction=0.1, verbose=False,
warm_start=False)
self.MLP.fit(self.TSnew_X, self.TSnew_Y)
def predict(self):
print("DEB Test")
testSetNew_X = pandas.DataFrame()
testSetNew_Y = pandas.DataFrame()
# preparing the test set - here you do the same as in function createTSnew:
if not os.path.isfile('testSetNew_X{}-{}.csv'.format(self.repeatSometimes, self.dim_random_subset)):
for i in range(0, self.testSet.shape[0]):
# choose small random subset of features X_hat
X_hat = [int(self.testSet.shape[1] * random.random()) for i in range(0, self.dim_random_subset)]
# insert into TSnew the sample: (x1...X_hat = 0 ... xk ; x1...xk)
row = numpy.copy(self.testSet.as_matrix()[i, :])
for feature in X_hat: # here you set the random features to 0. X_hat represents the indices of such features
row[feature] = 0
testSetNew_X = testSetNew_X.append(pandas.DataFrame(row.reshape(-1, len(row))))
copy = numpy.copy(self.testSet.as_matrix()[i, :])
testSetNew_Y = testSetNew_Y.append(pandas.DataFrame(copy.reshape(-1, len(copy)))) # Y = x1...xk
testSetNew_Y.to_csv('testSetNew_X{}-{}.csv'.format(self.repeatSometimes, self.dim_random_subset))
testSetNew_Y.to_csv('testSetNew_Y{}-{}.csv'.format(self.repeatSometimes, self.dim_random_subset))
else: # if the needed DataFrames have already been calculated, simply load them from disk
self.trainSet = self.trainSet.from_csv('testSetNew_X{}-{}.csv'.format(self.repeatSometimes, self.dim_random_subset))
self.trainSet = self.trainSet.from_csv('testSetNew_Y{}-{}.csv'.format(self.repeatSometimes, self.dim_random_subset))
# predictions
self.MLP.predict(testSetNew_X)
print("Score of method (repetitions={}, subset={}): {}%".format(self.repeatSometimes, self.dim_random_subset, self.MLP.score(testSetNew_X, testSetNew_Y) * 100))
########################## Helper functions ####################
def writeCSV(self):
print("DEB WriteCSV")
self.trainSet.to_csv('trainSet{}-{}.csv'.format(self.repeatSometimes, self.dim_random_subset))
self.testSet.to_csv('testSet{}-{}.csv'.format(self.repeatSometimes, self.dim_random_subset))
self.TSnew_X.to_csv('TSnew_X{}-{}.csv'.format(self.repeatSometimes, self.dim_random_subset))
self.TSnew_Y.to_csv('TSnew_Y{}-{}.csv'.format(self.repeatSometimes, self.dim_random_subset))
def readCSV(self):
print("DEB ReadCSV")
self.trainSet = self.trainSet.from_csv('trainSet{}-{}.csv'.format(self.repeatSometimes, self.dim_random_subset))
self.testSet = self.testSet.from_csv('testSet{}-{}.csv'.format(self.repeatSometimes, self.dim_random_subset))
self.TSnew_X = self.TSnew_X.from_csv('TSnew_X{}-{}.csv'.format(self.repeatSometimes, self.dim_random_subset))
self.TSnew_Y = self.TSnew_Y.from_csv('TSnew_Y{}-{}.csv'.format(self.repeatSometimes, self.dim_random_subset))
示例8: MLPRegressor
# 需要導入模塊: from sklearn.neural_network import MLPRegressor [as 別名]
# 或者: from sklearn.neural_network.MLPRegressor import score [as 別名]
Y_tr = pheno[:1000,1:] #slicing pheno
#Y_va = pheno[201:250,:]
Y_te = pheno[1001:,1:]
diabetes_X_train = X_tr
diabetes_X_test = X_te
diabetes_y_train = Y_tr
diabetes_y_test = Y_te
reg = MLPRegressor(hidden_layer_sizes=(1, ),algorithm='l-bfgs')
reg.fit(X_tr,Y_tr)
scores = cross_val_score(reg,geno[:,1:],pheno[:,1:],cv=10)
#Result_Y = np.zeros((249,1), dtype='float64')
Result_Y = reg.predict(X_te)
#Yte = np.array(Y_te, dtype=np.float64)
r_row,p_score = pearsonr(Result_Y,Y_te)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((reg.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % reg.score(diabetes_X_test, diabetes_y_test))
print(Result_Y)
print(scores)
print(Result_Y.shape)
print(r_row)
print(p_score)
示例9: getKaggleMNIST
# 需要導入模塊: from sklearn.neural_network import MLPRegressor [as 別名]
# 或者: from sklearn.neural_network.MLPRegressor import score [as 別名]
from __future__ import print_function, division
from future.utils import iteritems
from builtins import range, input
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
from sklearn.neural_network import MLPRegressor
from util import getKaggleMNIST
# get data
X, _, Xt, _ = getKaggleMNIST()
# create the model and train it
model = MLPRegressor()
model.fit(X, X)
# test the model
print("Train R^2:", model.score(X, X))
print("Test R^2:", model.score(Xt, Xt))
Xhat = model.predict(X)
mse = ((Xhat - X)**2).mean()
print("Train MSE:", mse)
Xhat = model.predict(Xt)
mse = ((Xhat - Xt)**2).mean()
print("Test MSE:", mse)