本文整理匯總了Python中sklearn.neural_network.MLPRegressor類的典型用法代碼示例。如果您正苦於以下問題:Python MLPRegressor類的具體用法?Python MLPRegressor怎麽用?Python MLPRegressor使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了MLPRegressor類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: regression
def regression(N, P):
assert len(N) == len(P)
clf = MLPRegressor(hidden_layer_sizes=(15, ), activation='relu', algorithm='adam', alpha=0.0001)
clf.fit (N, P)
return clf
示例2: _create_first_population
def _create_first_population(self):
self._current_population = []
for _ in range(self._n_individuals):
mlp = MLPRegressor(hidden_layer_sizes = self._nn_architecture, alpha=10**-10, max_iter=1)
mlp.fit([np.random.randn(self._n_features)], [np.random.randn(self._n_actions)])
mlp.out_activation_ = 'softmax'
self._current_population.append([mlp,0])
示例3: construct_train
def construct_train(train_length, **kwargs):
"""
Train and test model with given input
window and number of neurons in layer
"""
start_cur_postion = 0
steps, steplen = observations.size/(2 * train_length), train_length
if 'hidden_layer' in kwargs:
network = MLPRegressor(hidden_layer_sizes=kwargs['hidden_layer'])
else:
network = MLPRegressor()
quality = []
# fit model - configure parameters
network.fit(observations[start_cur_postion:train_length][:, 1].reshape(1, train_length),
observations[:, 1][start_cur_postion:train_length].reshape(1, train_length))
parts = []
# calculate predicted values
# for each step add all predicted values to a list
# TODO: add some parallelism here
for i in xrange(0, steps):
parts.append(network.predict(observations[start_cur_postion:train_length][:, 1]))
start_cur_postion += steplen
train_length += steplen
# estimate model quality using
result = np.array(parts).flatten().tolist()
for valnum, value in enumerate(result):
quality.append((value - observations[valnum][1])**2)
return sum(quality)/len(quality)
示例4: mlp_bench
def mlp_bench(x_train, y_train, x_test, fh):
"""
Forecasts using a simple MLP which 6 nodes in the hidden layer
:param x_train: train input data
:param y_train: target values for training
:param x_test: test data
:param fh: forecasting horizon
:return:
"""
y_hat_test = []
model = MLPRegressor(hidden_layer_sizes=6, activation='identity', solver='adam',
max_iter=100, learning_rate='adaptive', learning_rate_init=0.001,
random_state=42)
model.fit(x_train, y_train)
last_prediction = model.predict(x_test)[0]
for i in range(0, fh):
y_hat_test.append(last_prediction)
x_test[0] = np.roll(x_test[0], -1)
x_test[0, (len(x_test[0]) - 1)] = last_prediction
last_prediction = model.predict(x_test)[0]
return np.asarray(y_hat_test)
示例5: test_multioutput_regression
def test_multioutput_regression():
# Test that multi-output regression works as expected
X, y = make_regression(n_samples=200, n_targets=5)
mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50, max_iter=200,
random_state=1)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.9)
示例6: test_lbfgs_regression
def test_lbfgs_regression():
# Test lbfgs on the boston dataset, a regression problems."""
X = Xboston
y = yboston
for activation in ACTIVATION_TYPES:
mlp = MLPRegressor(algorithm='l-bfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.95)
示例7: GetOptimalCLF2
def GetOptimalCLF2(train_x,train_y,rand_starts = 8):
'''
Gets the optimal CLF function based on fixed settings
Parameters
------------------------
train_x - np.array
Training feature vectors
train_y - np.array
Training label vectors
rand_starts - int
Number of random starts to do
Default - 8 for 95% confidence and best 30%
Returns
------------------------
max_clf - sklearn function
Optimal trained artificial neuron network
'''
#### Get number of feature inputs of training vector
n_input = train_x.shape[1]
#### Set initial loss value
min_loss = 1e10
#### Perform number of trainings according to random start set
for i in range(rand_starts):
#### Print current status
print "Iteration number {}".format(i+1)
#### Initialize ANN network
clf = MLPRegressor(hidden_layer_sizes = (int(round(2*np.sqrt(n_input),0)),1), activation = 'logistic',solver = 'sgd',
learning_rate = 'adaptive', max_iter = 100000000,tol = 1e-10,
early_stopping = True, validation_fraction = 1/3.)
#### Fit data
clf.fit(train_x,train_y)
#### Get current loss
cur_loss = clf.loss_
#### Save current clf if loss is minimum
if cur_loss < min_loss:
#### Set min_loss to a new value
min_loss = cur_loss
#### Set max_clf to new value
max_clf = clf
return max_clf
示例8: MLP_Regressor
def MLP_Regressor(train_x, train_y):
clf = MLPRegressor( alpha=1e-05,
batch_size='auto', beta_1=0.9, beta_2=0.999, early_stopping=False,
epsilon=1e-08, hidden_layer_sizes=([8,8]), learning_rate='constant',
learning_rate_init=0.01, max_iter=500, momentum=0.9,
nesterovs_momentum=True, power_t=0.5, random_state=1, shuffle=True,
tol=0.0001, validation_fraction=0.1, verbose=False,
warm_start=False)
clf.fit(train_x, train_y)
#score = metrics.accuracy_score(clf.predict((train_x)), (train_y))
#print(score)
return clf
示例9: test_lbfgs_regression
def test_lbfgs_regression():
# Test lbfgs on the boston dataset, a regression problems.
X = Xboston
y = yboston
for activation in ACTIVATION_TYPES:
mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X, y)
if activation == 'identity':
assert_greater(mlp.score(X, y), 0.84)
else:
# Non linear models perform much better than linear bottleneck:
assert_greater(mlp.score(X, y), 0.95)
示例10: __init__
def __init__(self):
self._nn = MLPRegressor(hidden_layer_sizes=(10,), verbose=False, warm_start=True)
self._entradas_entrenamiento = []
self._salidas_esperadas_entrenamiento = []
# Parámetro de TD-lambda
self.lambdaCoefficient = 0.9
示例11: __init__
def __init__(self, num_inputs, num_outputs):
self.nx = num_inputs
self.ny = num_outputs
self.net = MLPRegressor(hidden_layer_sizes=(50, 10),
max_iter=1,
algorithm='sgd',
learning_rate='constant',
learning_rate_init=0.001,
warm_start=True,
momentum=0.9,
nesterovs_momentum=True
)
self.initialize_network()
# set experience replay
self.mbsize = 128 # mini-batch size
self.er_s = []
self.er_a = []
self.er_r = []
self.er_done = []
self.er_sp = []
self.er_size = 2000 # total size of mb, impliment as queue
self.whead = 0 # write head
示例12: train_model
def train_model(x_train, y_train, alpha=1e-3, hid_layers=[512], max_iter=100):
"""
Train model on training data.
:param x_train: training examples
:param y_train: target variables
:param alpha: L2 regularization coefficient
:param hid_layers: hidden layer sizes
:param max_iter: maximum number of iterations in L-BFGS optimization
:return a model trained with neuron network
"""
nn_model = MLPRegressor(solver='lbgfs', hidden_layer_sizes=hid_layers,
alpha=alpha, max_iter=max_iter,
activation="relu", random_state=1)
nn_model.fit(x_train, y_train)
return nn_model
示例13: train
def train(self):
print("DEB Training with TSnew")
self.MLP = MLPRegressor(activation='relu', alpha=1e-05, batch_size='auto', beta_1=0.9,
beta_2=0.999, early_stopping=False, epsilon=1e-08,
hidden_layer_sizes=len(self.TSnew_Y.columns), learning_rate='constant',
learning_rate_init=0.001, max_iter=200, momentum=0.9,
nesterovs_momentum=True, power_t=0.5, random_state=1, shuffle=True,
solver='lbfgs', tol=0.0001, validation_fraction=0.1, verbose=False,
warm_start=False)
self.MLP.fit(self.TSnew_X, self.TSnew_Y)
示例14: __init__
class Ann:
def __init__(self):
self._nn = MLPRegressor(hidden_layer_sizes=(10,), verbose=False, warm_start=True)
self._entradas_entrenamiento = []
self._salidas_esperadas_entrenamiento = []
self.lambdaCoefficient = 0.9
def evaluar(self, entrada):
return self._nn.predict(entrada)
def agregar_a_entrenamiento(self, tableros, resultado):
tableros.reverse()
for i in xrange(len(tableros)):
tablero, valorEstimado = tableros[i][0], tableros[i][1]
self._entradas_entrenamiento.append(tablero)
if i == 0 or True:
self._salidas_esperadas_entrenamiento.append(resultado.value)
else:
valorAAprender = valorEstimado + self.lambdaCoefficient * (self._salidas_esperadas_entrenamiento[i-1] -
valorEstimado)
self._salidas_esperadas_entrenamiento.append(valorAAprender)
def entrenar(self):
self._nn.partial_fit(self._entradas_entrenamiento, self._salidas_esperadas_entrenamiento)
self._entradas_entrenamiento = []
self._salidas_esperadas_entrenamiento = []
def almacenar(self):
pickle.dump(self._nn, open(self.path,'wb'))
def cargar(self, path, red):
self.path = path
if os.path.isfile(path):
self._nn = pickle.load(open(path, 'rb'))
else:
self._nn = red
tableroVacio = ([EnumCasilla.EMPTY.value for _ in xrange(64)],0)
self.agregar_a_entrenamiento([tableroVacio], EnumResultado.EMPATE)
self.entrenar()
示例15: _create_new_nn
def _create_new_nn(self, weights, biases):
mlp = MLPRegressor(hidden_layer_sizes = self._nn_architecture, alpha=10**-10, max_iter=1)
mlp.fit([np.random.randn(self._n_features)], [np.random.randn(self._n_actions)])
mlp.coefs_ = weights
mlp.intercepts_ = biases
mlp.out_activation_ = 'softmax'
return mlp