當前位置: 首頁>>代碼示例>>Python>>正文


Python MLPRegressor.predict方法代碼示例

本文整理匯總了Python中sklearn.neural_network.MLPRegressor.predict方法的典型用法代碼示例。如果您正苦於以下問題:Python MLPRegressor.predict方法的具體用法?Python MLPRegressor.predict怎麽用?Python MLPRegressor.predict使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在sklearn.neural_network.MLPRegressor的用法示例。


在下文中一共展示了MLPRegressor.predict方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: mlp_bench

# 需要導入模塊: from sklearn.neural_network import MLPRegressor [as 別名]
# 或者: from sklearn.neural_network.MLPRegressor import predict [as 別名]
def mlp_bench(x_train, y_train, x_test, fh):
    """
    Forecasts using a simple MLP which 6 nodes in the hidden layer

    :param x_train: train input data
    :param y_train: target values for training
    :param x_test: test data
    :param fh: forecasting horizon
    :return:
    """
    y_hat_test = []

    model = MLPRegressor(hidden_layer_sizes=6, activation='identity', solver='adam',
                         max_iter=100, learning_rate='adaptive', learning_rate_init=0.001,
                         random_state=42)
    model.fit(x_train, y_train)

    last_prediction = model.predict(x_test)[0]
    for i in range(0, fh):
        y_hat_test.append(last_prediction)
        x_test[0] = np.roll(x_test[0], -1)
        x_test[0, (len(x_test[0]) - 1)] = last_prediction
        last_prediction = model.predict(x_test)[0]

    return np.asarray(y_hat_test)
開發者ID:KaterinaKou,項目名稱:M4-methods,代碼行數:27,代碼來源:ML_benchmarks.py

示例2: construct_train

# 需要導入模塊: from sklearn.neural_network import MLPRegressor [as 別名]
# 或者: from sklearn.neural_network.MLPRegressor import predict [as 別名]
def construct_train(train_length, **kwargs):
    """
    Train and test model with given input
    window and number of neurons in layer
    """
    start_cur_postion = 0
    steps, steplen = observations.size/(2 * train_length), train_length

    if 'hidden_layer' in kwargs:
        network = MLPRegressor(hidden_layer_sizes=kwargs['hidden_layer'])
    else:
        network = MLPRegressor()

    quality = []

    # fit model - configure parameters
    network.fit(observations[start_cur_postion:train_length][:, 1].reshape(1, train_length),
                observations[:, 1][start_cur_postion:train_length].reshape(1, train_length))

    parts = []

    # calculate predicted values
    # for each step add all predicted values to a list
    # TODO: add some parallelism here
    for i in xrange(0, steps):
        parts.append(network.predict(observations[start_cur_postion:train_length][:, 1]))
        start_cur_postion += steplen
        train_length += steplen

    # estimate model quality using 
    result = np.array(parts).flatten().tolist()
    for valnum, value in enumerate(result):
        quality.append((value - observations[valnum][1])**2)

    return sum(quality)/len(quality)
開發者ID:AntonKorobkov,項目名稱:HW_3,代碼行數:37,代碼來源:homework_3_Korobkov.py

示例3: test_partial_fit_regression

# 需要導入模塊: from sklearn.neural_network import MLPRegressor [as 別名]
# 或者: from sklearn.neural_network.MLPRegressor import predict [as 別名]
def test_partial_fit_regression():
    # Test partial_fit on regression.
    # `partial_fit` should yield the same results as 'fit' for regression.
    X = Xboston
    y = yboston

    for momentum in [0, .9]:
        mlp = MLPRegressor(solver='sgd', max_iter=100, activation='relu',
                           random_state=1, learning_rate_init=0.01,
                           batch_size=X.shape[0], momentum=momentum)
        with warnings.catch_warnings(record=True):
            # catch convergence warning
            mlp.fit(X, y)
        pred1 = mlp.predict(X)
        mlp = MLPRegressor(solver='sgd', activation='relu',
                           learning_rate_init=0.01, random_state=1,
                           batch_size=X.shape[0], momentum=momentum)
        for i in range(100):
            mlp.partial_fit(X, y)

        pred2 = mlp.predict(X)
        assert_almost_equal(pred1, pred2, decimal=2)
        score = mlp.score(X, y)
        assert_greater(score, 0.75)
開發者ID:aniryou,項目名稱:scikit-learn,代碼行數:26,代碼來源:test_mlp.py

示例4: __init__

# 需要導入模塊: from sklearn.neural_network import MLPRegressor [as 別名]
# 或者: from sklearn.neural_network.MLPRegressor import predict [as 別名]
class Ann:

    def __init__(self):

        self._nn = MLPRegressor(hidden_layer_sizes=(10,), verbose=False, warm_start=True)
        self._entradas_entrenamiento = []
        self._salidas_esperadas_entrenamiento = []
        self.lambdaCoefficient = 0.9

    def evaluar(self, entrada):
        return self._nn.predict(entrada)

    def agregar_a_entrenamiento(self, tableros, resultado):

        tableros.reverse()
        for i in xrange(len(tableros)):
            tablero, valorEstimado = tableros[i][0], tableros[i][1]
            self._entradas_entrenamiento.append(tablero)
            if i == 0 or True:
                self._salidas_esperadas_entrenamiento.append(resultado.value)
            else:
                valorAAprender = valorEstimado + self.lambdaCoefficient * (self._salidas_esperadas_entrenamiento[i-1] -
                    valorEstimado)
                self._salidas_esperadas_entrenamiento.append(valorAAprender)

    def entrenar(self):
        self._nn.partial_fit(self._entradas_entrenamiento, self._salidas_esperadas_entrenamiento)
        self._entradas_entrenamiento = []
        self._salidas_esperadas_entrenamiento = []

    def almacenar(self):
        pickle.dump(self._nn, open(self.path,'wb'))

    def cargar(self, path, red):
        self.path = path
        if os.path.isfile(path):
            self._nn = pickle.load(open(path, 'rb'))
        else:
            self._nn = red
            tableroVacio = ([EnumCasilla.EMPTY.value for _ in xrange(64)],0)
            self.agregar_a_entrenamiento([tableroVacio], EnumResultado.EMPATE)
            self.entrenar()
開發者ID:gsiriani,項目名稱:MAA,代碼行數:44,代碼來源:JugadorGrupoSimple-no-usar.py

示例5: MLPRegressor

# 需要導入模塊: from sklearn.neural_network import MLPRegressor [as 別名]
# 或者: from sklearn.neural_network.MLPRegressor import predict [as 別名]
#Example  with a Regressor using the scikit-learn library
# example for the XOr gate
from sklearn.neural_network import MLPRegressor 

X = [[0., 0.],[0., 1.], [1., 0.], [1., 1.]] # each one of the entries 00 01 10 11
y = [0, 1, 1, 0] # outputs for each one of the entries

# check http://scikit-learn.org/dev/modules/generated/sklearn.neural_network.MLPRegressor.html#sklearn.neural_network.MLPRegressor
#for more details
reg = MLPRegressor(hidden_layer_sizes=(5),activation='tanh', algorithm='sgd', alpha=0.001, learning_rate='constant',
                   max_iter=10000, random_state=None, verbose=False, warm_start=False, momentum=0.8, tol=10e-8, shuffle=False)

reg.fit(X,y)

outp =  reg.predict([[0., 0.],[0., 1.], [1., 0.], [1., 1.]])

print'Results:'
print '0 0 0:', outp[0]
print '0 1 1:', outp[1]
print '1 0 1:', outp[2]
print '1 1 0:', outp[0]
print'Score:', reg.score(X, y)
開發者ID:ithallojunior,項目名稱:NN_compare,代碼行數:24,代碼來源:xor_reg.py

示例6: QN

# 需要導入模塊: from sklearn.neural_network import MLPRegressor [as 別名]
# 或者: from sklearn.neural_network.MLPRegressor import predict [as 別名]
class QN(object):
    def __init__(self, num_inputs, num_outputs):
        self.nx = num_inputs
        self.ny = num_outputs
        self.net = MLPRegressor(hidden_layer_sizes=(50, 10),
                                max_iter=1,
                                algorithm='sgd',
                                learning_rate='constant',
                                learning_rate_init=0.001,
                                warm_start=True,
                                momentum=0.9,
                                nesterovs_momentum=True
                                )

        self.initialize_network()

        # set experience replay
        self.mbsize = 128 # mini-batch size
        self.er_s = []
        self.er_a = []
        self.er_r = []
        self.er_done = []
        self.er_sp = []

        self.er_size = 2000  # total size of mb, impliment as queue
        self.whead = 0  # write head

    def initialize_network(self):
        # function to initialize network weights
        xtrain = np.random.rand(256, self.nx)
        ytrain = 10 + np.random.rand(256, self.ny)
        self.net.fit(xtrain, ytrain)

    def update_network(self):
        # function updates network by sampling a mini-batch from the ER
        # Prepare train data
        chosen = list(np.random.randint(len(self.er_s), size=min(len(self.er_s), self.mbsize)))
        Xtrain = np.asarray([self.er_s[i] for i in chosen])
        # calculate target
        target = np.random.rand(len(chosen), self.ny)

        for j, i in enumerate(chosen):
            # do a forward pass through s and sp
            Q_s = self.net.predict(self.er_s[i].reshape(1, -1))
            Q_sp = self.net.predict(self.er_sp[i].reshape(1, -1))
            target[j, :] = Q_s  # target initialized to current prediction

            if (self.er_done[i] == True):
                target[j, self.er_a[i]] = self.er_r[i]  # if end of episode, target is terminal reward
            else:
                target[j, self.er_a[i]] = self.er_r[i] + 0.9 * max(max(Q_sp))  # Q_sp is list of list (why?)

        # fit the network
        self.net.fit(Xtrain, target)  # single step of SGD

    def append_memory(self, s, a, r, sp, done):
        if (len(self.er_s) < self.er_size):
            self.er_s.append(s)
            self.er_a.append(a)
            self.er_r.append(r)
            self.er_sp.append(sp)
            self.er_done.append(done)
            self.whead = (self.whead + 1) % self.er_size
        else:
            self.er_s[self.whead] = s
            self.er_a[self.whead] = a
            self.er_r[self.whead] = r
            self.er_sp[self.whead] = sp
            self.er_done[self.whead] = done
            self.whead = (self.whead+1) % self.er_size
開發者ID:aravindr93,項目名稱:RL-tasks,代碼行數:72,代碼來源:play_agent.py

示例7: print

# 需要導入模塊: from sklearn.neural_network import MLPRegressor [as 別名]
# 或者: from sklearn.neural_network.MLPRegressor import predict [as 別名]
axes.set_title("Data: " + file)
axes.set_ylabel('Normalized distant count')
axes.set_xlabel('Distance ($\AA$)')

axes.hist(y_train, 150, color='blue',normed=True, label='plot',linewidth=2,alpha=1.0)
plt.show()
"""

# Fit model
clf.fit(X_train, y_train)

# Compute and print r^2 score
print(clf.score(X_test, y_test))

# Store predicted energies
Ecmp = clf.predict(X_test)

Ecmp = gt.hatokcal * (Ecmp)
Eact = gt.hatokcal * (y_test)

# Compute RMSE in kcal/mol
rmse = gt.calculaterootmeansqrerror(Ecmp, Eact)

# End timer
_t1e = tm.time()
print("Computation complete. Time: " + "{:.4f}".format((_t1e - _t1b)) + "s")

# Output model information
print("RMSE: " + str(rmse))
# print(clf.coef_)
# print(clf.intercept_)
開發者ID:Jussmith01,項目名稱:PycharmProjects,代碼行數:33,代碼來源:cm-mlp-ANI_type_dataset.py

示例8: MLPRegressor

# 需要導入模塊: from sklearn.neural_network import MLPRegressor [as 別名]
# 或者: from sklearn.neural_network.MLPRegressor import predict [as 別名]
from sklearn.neural_network import MLPRegressor
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error



data = pd.read_csv('network_backup_dataset.csv')
train = data.loc[:,['WeekNumber','DayofWeek','BackupStartTime','WorkFlowID','FileName','BackupTime']]
target = data.loc[:,['SizeofBackup']]
mlp = MLPRegressor(algorithm='sgd', hidden_layer_sizes=150,
                   max_iter=200, shuffle=False, random_state=1)

mlp.fit(train, target)
prediction = mlp.predict(train)

plt.plot(prediction,label='Prediction',color='red')
plt.plot(target,label='Real Data',color='blue')
plt.title('Copy Size versus Time based on Neural Network Regression')
plt.xlabel('Time')
plt.ylabel('Copy Size')
plt.legend()
plt.show()

rmse = mean_squared_error(target.SizeofBackup,prediction)**0.5
print (rmse)
開發者ID:Lavender-Ding,項目名稱:UCLA-EE239AS-Special-topics-on-Data-Mining-and-Machine-Learning,代碼行數:29,代碼來源:Neural.Network.py

示例9: open

# 需要導入模塊: from sklearn.neural_network import MLPRegressor [as 別名]
# 或者: from sklearn.neural_network.MLPRegressor import predict [as 別名]
from datetime import datetime

startTime = datetime.now()

fileTrain = open("fingerDataTrain.dat",'r')
fileVal = open("fingerDataVal.dat",'r')
trainingSet = np.loadtxt(fileTrain)
valSet = np.loadtxt(fileVal)
fileTrain.close()
fileVal.close()

trainX = trainingSet[:,:13]
trainY = trainingSet[:,14:]
valX = valSet[:,:13]
valY = valSet[:,14:]

for i in range(trainX.shape[1]):
    m = trainX[:,i].mean()
    s = trainX[:,i].std()
    trainX[:,i] = (trainX[:,i]-m)/s
    valX[:,i] = (valX[:,i]-m)/s


ann = MLPRegressor()
ann.fit(trainX,trainY)
sqError = ((ann.predict(valX)-valY)**2).mean()

plt.scatter(valX[:,1], valY[:,3],  color='black')
plt.plot(valX[:,1], ann.predict(valX)[:,3], color='blue', linewidth=3)

print datetime.now() - startTime
開發者ID:mshalvagal,項目名稱:IndexFinger,代碼行數:33,代碼來源:ANNsklearn.py

示例10: TSnew

# 需要導入模塊: from sklearn.neural_network import MLPRegressor [as 別名]
# 或者: from sklearn.neural_network.MLPRegressor import predict [as 別名]
class NeuralNetwork:
    ################# Fields #######################
    # dataset_filename: string - path to dataset
    # header: list - header of the dataset
    # enumerable_columns: list - the enumerable columns

    # df: matrix - data set
    # training_set: matrix - training set
    # test_set: matrix - test set

    # TSnew_X: matrix - training set of TSnew (see documentation)
    # TSnew_Y: matrix - training set of TSnew (see documentation)
    # dim_random_subset: int - number of features to set to 0 (see documentation)
    # repeatSometimes: int - number of for cicles (see documentation)

    def __init__(self, repeatSometimes = 2, dim_random_subset = 2):
        # variables initialization
        self.enumerable_columns = []
        self.dataset_filename = ""
        self.header = []
        self.df = pandas.DataFrame()
        self.trainSet = pandas.DataFrame()
        self.testSet = pandas.DataFrame()
        self.TSnew_X = pandas.DataFrame()
        self.TSnew_Y = pandas.DataFrame()

        self.repeatSometimes = repeatSometimes
        self.dim_random_subset = dim_random_subset

        # This code really needs much time and therefore I save some computations
        if not os.path.isfile('trainSet{}-{}.csv'.format(repeatSometimes, dim_random_subset)):
            self.readDataset()
            self.discretization()
            self.preprocess()

            # creating TSnew
            self.createTrainingAndTestSet()
            self.createTSnew()

            # backup encoded sets
            self.writeCSV()
        else:
            self.readCSV()

        # training and test
        self.train()
        self.predict()


    def readDataset(self):
        print("DEB Read dataset")

        with open('header.txt') as f:
            self.header = f.read().split(',')
            print(self.header)
        with open('dataset.txt') as f:
            self.dataset_filename = f.read()
            print(self.dataset_filename)
        self.df = pandas.read_csv(self.dataset_filename, names=self.header)
        print('Dataset with {} entries'.format(self.df.__len__()))

############# Preprocessing ##########################
    # helper function (should not be called from other functions)
    def discretize(self, column):
        print("DEB Discretize column " + column)
        sorted_col = sorted(column)
        l = len(column)
        n = int(numpy.floor(l / 2))
        if l % 2 == 0:
            median_1 = numpy.median(sorted_col[0:n])
            median_2 = numpy.median(sorted_col[n:])
        else:
            median_1 = numpy.median(sorted_col[0:(n + 1)])
            median_2 = numpy.median(sorted_col[(n + 1):])
        iqr = median_2 - median_1
        h = 2 * iqr * (1 / numpy.cbrt(l))
        if h > 0:
            bins_number = numpy.ceil((column.max() - column.min()) / h)
            new_col, bins = pandas.cut(column, bins_number, labels=False, retbins=True, include_lowest=False)
        else:
           new_col = column
           bins = []
        return new_col, bins

    # helper function (should not be called from other functions)
    def normalize(column):
        print("DEB Normalize")
        h = abs(column.min())
        new_col = column + h
        return new_col

    def discretization(self):
        print("DEB Discretization")
        replacements = {}
        bins = {}
        for i in range(0, self.df.shape[1]):  # for each feature
            bins[i] = []
            col = self.df.as_matrix()[:, i]
            flag_str = False
            flag_float = False
#.........這裏部分代碼省略.........
開發者ID:HerrAugust,項目名稱:EserciziUni,代碼行數:103,代碼來源:NeuralNetwork.py

示例11: __init__

# 需要導入模塊: from sklearn.neural_network import MLPRegressor [as 別名]
# 或者: from sklearn.neural_network.MLPRegressor import predict [as 別名]
class Ann:
    '''
        Implementación e interfaz de la funcionalidad presentada de la ANN
    '''
    def __init__(self):

        self._nn = MLPRegressor(hidden_layer_sizes=(10,), verbose=False, warm_start=True)
        self._entradas_entrenamiento = []
        self._salidas_esperadas_entrenamiento = []
        # Parámetro de TD-lambda
        self.lambdaCoefficient = 0.9

    def evaluar(self, entrada):
        '''
            Devuelve la evaluación de la red para la entrada
        '''
        return self._nn.predict(entrada)

    def agregar_a_entrenamiento(self, tableros, resultado):
        '''
            Incorpora los datos de la partida a los ejemplos de entrenamiento
        '''

        # Presento la partida de adelante para atrás
        tableros.reverse()
        for i in xrange(len(tableros)):
            # Representación del tablero, Valor estimado
            tablero, valorEstimado = tableros[i][0], tableros[i][1]
            self._entradas_entrenamiento.append(tablero)
            if i == 0 or True:
                # Si es el resultado final, utilizo como salida esperada el resultado de la partida
                self._salidas_esperadas_entrenamiento.append(resultado.value)
            else:
                # El valor a aprender dado por según TD-lambda
                valorAAprender = valorEstimado + self.lambdaCoefficient * (
                    self._salidas_esperadas_entrenamiento[i - 1] - valorEstimado)
                self._salidas_esperadas_entrenamiento.append(valorAAprender)

    def entrenar(self):
        '''
            Aplico el entrenamiento a partir de los datos almacenados
        '''
        self._nn.partial_fit(self._entradas_entrenamiento, self._salidas_esperadas_entrenamiento)
        self._entradas_entrenamiento = []
        self._salidas_esperadas_entrenamiento = []

    def almacenar(self):
        '''
            Serializo y persisto la red
        '''
        pickle.dump(self._nn, open(self.path, 'wb'))

    def cargar(self, path, red):
        '''
            Deserealizo o creo una nueva red
        '''
        self.path = path
        if os.path.isfile(path):
            # Si el archivo especificado existe, deserealizo la red
            self._nn = pickle.load(open(path, 'rb'))
        else:
            # Si no, inicializo la red especificada
            self._nn = red
            tableroVacio = ([EnumCasilla.EMPTY.value for _ in xrange(64)], 0)
            self.agregar_a_entrenamiento([tableroVacio], EnumResultado.EMPATE)
            self.entrenar()
開發者ID:gsiriani,項目名稱:MAA,代碼行數:68,代碼來源:JugadorGrupo3.py

示例12: MLPRegressor

# 需要導入模塊: from sklearn.neural_network import MLPRegressor [as 別名]
# 或者: from sklearn.neural_network.MLPRegressor import predict [as 別名]
Y_tr = pheno[:1000,1:]   #slicing pheno
#Y_va = pheno[201:250,:]
Y_te = pheno[1001:,1:]

diabetes_X_train = X_tr
diabetes_X_test = X_te
diabetes_y_train = Y_tr
diabetes_y_test = Y_te

reg = MLPRegressor(hidden_layer_sizes=(1, ),algorithm='l-bfgs')
reg.fit(X_tr,Y_tr)

scores = cross_val_score(reg,geno[:,1:],pheno[:,1:],cv=10)

#Result_Y = np.zeros((249,1), dtype='float64')
Result_Y = reg.predict(X_te)
#Yte = np.array(Y_te, dtype=np.float64) 
r_row,p_score = pearsonr(Result_Y,Y_te)

# The mean square error
print("Residual sum of squares: %.2f"
      % np.mean((reg.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % reg.score(diabetes_X_test, diabetes_y_test))
print(Result_Y)
print(scores)
print(Result_Y.shape)
print(r_row)
print(p_score)

開發者ID:godisboy,項目名稱:SNP-deep-learning,代碼行數:31,代碼來源:GBLUP.py

示例13: getKaggleMNIST

# 需要導入模塊: from sklearn.neural_network import MLPRegressor [as 別名]
# 或者: from sklearn.neural_network.MLPRegressor import predict [as 別名]
from __future__ import print_function, division
from future.utils import iteritems
from builtins import range, input
# Note: you may need to update your version of future
# sudo pip install -U future


import numpy as np
from sklearn.neural_network import MLPRegressor
from util import getKaggleMNIST



# get data
X, _, Xt, _ = getKaggleMNIST()

# create the model and train it
model = MLPRegressor()
model.fit(X, X)

# test the model
print("Train R^2:", model.score(X, X))
print("Test R^2:", model.score(Xt, Xt))

Xhat = model.predict(X)
mse = ((Xhat - X)**2).mean()
print("Train MSE:", mse)

Xhat = model.predict(Xt)
mse = ((Xhat - Xt)**2).mean()
print("Test MSE:", mse)
開發者ID:lazyprogrammer,項目名稱:machine_learning_examples,代碼行數:33,代碼來源:sk_mlp.py

示例14: KNeighborsRegressor

# 需要導入模塊: from sklearn.neural_network import MLPRegressor [as 別名]
# 或者: from sklearn.neural_network.MLPRegressor import predict [as 別名]
KNN = KNeighborsRegressor()
knn_param_grid = {'n_neighbors':[3,10]}
knn_grid = model_selection.GridSearchCV(KNN, knn_param_grid, cv=10, n_jobs=25, verbose=1, scoring='neg_mean_squared_error')
knn_grid.fit(X_train, y_train)
print(' Best  Params:' + str(knn_grid.best_params_))
KNN = KNeighborsRegressor(n_neighbors=10)
KNN.fit(X_train, y_train)
y_predict_knn=KNN.predict(X_test)
mae_knn=(np.abs(y_predict_knn-y_test)).sum()/9467
joblib.dump(KNN, 'KNN.model')
print(mae_knn)
#mlp
from sklearn.neural_network import MLPRegressor
MLP = MLPRegressor(hidden_layer_sizes=(300, 200,200),max_iter=100,activation='relu')
MLP.fit(X_train, y_train)
y_predict_MLP=MLP.predict(X_test)
mae_MLP=(np.abs(y_predict_MLP-y_test)).sum()/9467
joblib.dump(MLP, 'MLP.model')
print(mae_MLP)
#xgb
import xgboost  as xgb
x_regress = xgb.XGBRegressor(max_depth=20,n_estimators =5000)
x_regress_param_grid = {'max_depth': [5,20]}
x_regress_grid = model_selection.GridSearchCV(x_regress, x_regress_param_grid, cv=10, n_jobs=25, verbose=1, scoring='neg_mean_squared_error')
x_regress.fit(X_train, y_train)
joblib.dump(x_regress, 'x_regress_grid.model')
y_predict_xgb=x_regress.predict(X_test)

mae_xgb=(np.abs(y_predict_xgb-y_test)).sum()/9467
# 模型融合
#簡單平均 
開發者ID:lushunn,項目名稱:-price-of-house-predic_lianjia,代碼行數:33,代碼來源:anli.py


注:本文中的sklearn.neural_network.MLPRegressor.predict方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。