本文整理汇总了Python中sklearn.neural_network.MLPClassifier.partial_fit方法的典型用法代码示例。如果您正苦于以下问题:Python MLPClassifier.partial_fit方法的具体用法?Python MLPClassifier.partial_fit怎么用?Python MLPClassifier.partial_fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.neural_network.MLPClassifier
的用法示例。
在下文中一共展示了MLPClassifier.partial_fit方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: init_Q
# 需要导入模块: from sklearn.neural_network import MLPClassifier [as 别名]
# 或者: from sklearn.neural_network.MLPClassifier import partial_fit [as 别名]
def init_Q():
# make some dummy training set
board = init_board()
board_vec = board2vec(board)
X = np.array([board_vec])
y = [(BOARD_SIZE-1)**2]
board_vec = np.invert(board_vec)
X = np.append(X,np.array([board_vec]),axis=0)
y.append(0)
edges = get_potential_moves(board) # all the edges, since the board is empty
for edge in edges:
i = edge2ind(edge)
board_vec[i] = False
X = np.append(X,np.array([board_vec]),axis=0)
y.append(check_surrounding_squares(board,edge,0))
board_vec[i] = True
Q = MLPClassifier(warm_start=True,
hidden_layer_sizes=(BOARD_SIZE,10*BOARD_SIZE,BOARD_SIZE),
tol = 1e-10,
)
# Q = DecisionTreeRegressor()
# shf = range(len(y))
# for j in xrange(100):
# random.shuffle(shf)
# Xshf = [X[i] for i in shf]
# yshf = [y[i] for i in shf]
triedy = range((BOARD_SIZE-1)**2+1)
Q.partial_fit(np.repeat(X,100,axis=0),np.repeat(y,100,axis=0),classes=triedy)
print(Q.predict(X))
return(Q)
示例2: test_partial_fit_classes_error
# 需要导入模块: from sklearn.neural_network import MLPClassifier [as 别名]
# 或者: from sklearn.neural_network.MLPClassifier import partial_fit [as 别名]
def test_partial_fit_classes_error():
# Tests that passing different classes to partial_fit raises an error
X = [[3, 2]]
y = [0]
clf = MLPClassifier(solver='sgd')
clf.partial_fit(X, y, classes=[0, 1])
assert_raises(ValueError, clf.partial_fit, X, y, classes=[1, 2])
示例3: test_partial_fit_unseen_classes
# 需要导入模块: from sklearn.neural_network import MLPClassifier [as 别名]
# 或者: from sklearn.neural_network.MLPClassifier import partial_fit [as 别名]
def test_partial_fit_unseen_classes():
# Non regression test for bug 6994
# Tests for labeling errors in partial fit
clf = MLPClassifier(random_state=0)
clf.partial_fit([[1], [2], [3]], ["a", "b", "c"],
classes=["a", "b", "c", "d"])
clf.partial_fit([[4]], ["d"])
assert_greater(clf.score([[1], [2], [3], [4]], ["a", "b", "c", "d"]), 0)
示例4: main
# 需要导入模块: from sklearn.neural_network import MLPClassifier [as 别名]
# 或者: from sklearn.neural_network.MLPClassifier import partial_fit [as 别名]
def main():
enc = OneHotEncoder(n_values=[7,7,7,7,7,7])
burgers = pandas.read_hdf('../../../machine/data.h5', 'df')
X = burgers.drop(['output'], axis=1)
y = burgers['output']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
clf = MLPClassifier(solver='adam', activation='relu',
hidden_layer_sizes=64,
verbose=False,
max_iter=10000,
tol=1e-9,
random_state=1)
classes = numpy.unique(y)
i = 0
while True:
burgers = X_train[y_train == 1]
notburgers = X_train[y_train == 0]
# Pull 32 samples from training data,
# where half the samples come from each class
sample = burgers.sample(16).join(y_train)
sample = sample.append(notburgers.sample(16).join(y_train))
sample_X_train = sample.drop(['output'], axis=1)
sample_y_train = sample['output']
sample_X_train_categoricals = sample_X_train[column_names]
tX_sample_train_categoricals = enc.fit_transform(sample_X_train_categoricals)
clf.partial_fit(tX_sample_train_categoricals, sample_y_train.as_matrix().astype(int), classes=classes)
if (i % 5) == 0:
print(i)
X_test_categoricals = X_test[column_names]
tX_test_categoricals = enc.fit_transform(X_test_categoricals)
prediction = clf.predict(tX_test_categoricals)
print_eval(y_test, prediction)
print(classification_report(y_test, prediction))
i += 1
X_train_categoricals = X_train[column_names]
tX_train_categoricals = enc.fit_transform(X_train_categoricals)
probs = clf.predict_proba(tX_train_categoricals)
# Store the probabilities
X_train_copy = X_train.copy()
X_train_copy['prob_notburger'] = probs[:,0]
X_train_copy['prob_burger'] = probs[:,1]
X_train_categoricals = X_train_copy[column_names]
tX_train_categoricals = enc.fit_transform(X_train_categoricals)
prediction = clf.predict(tX_train_categoricals)
pickle.dump(clf, open("clf.pkl.tmp", "wb"))
os.rename("clf.pkl.tmp", "clf.pkl")
示例5: test_verbose_sgd
# 需要导入模块: from sklearn.neural_network import MLPClassifier [as 别名]
# 或者: from sklearn.neural_network.MLPClassifier import partial_fit [as 别名]
def test_verbose_sgd():
# Test verbose.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(algorithm='sgd', max_iter=2, verbose=10,
hidden_layer_sizes=2)
old_stdout = sys.stdout
sys.stdout = output = StringIO()
clf.fit(X, y)
clf.partial_fit(X, y)
sys.stdout = old_stdout
assert 'Iteration' in output.getvalue()
示例6: test_verbose_sgd
# 需要导入模块: from sklearn.neural_network import MLPClassifier [as 别名]
# 或者: from sklearn.neural_network.MLPClassifier import partial_fit [as 别名]
def test_verbose_sgd():
# Test verbose.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(solver='sgd', max_iter=2, verbose=10,
hidden_layer_sizes=2)
old_stdout = sys.stdout
sys.stdout = output = StringIO()
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
clf.partial_fit(X, y)
sys.stdout = old_stdout
assert 'Iteration' in output.getvalue()
示例7: test_multilabel_classification
# 需要导入模块: from sklearn.neural_network import MLPClassifier [as 别名]
# 或者: from sklearn.neural_network.MLPClassifier import partial_fit [as 别名]
def test_multilabel_classification():
# Test that multi-label classification works as expected.
# test fit method
X, y = make_multilabel_classification(n_samples=50, random_state=0,
return_indicator=True)
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=50, alpha=1e-5,
max_iter=150, random_state=0, activation='logistic',
learning_rate_init=0.2)
mlp.fit(X, y)
assert_equal(mlp.score(X, y), 1)
# test partial fit method
mlp = MLPClassifier(solver='sgd', hidden_layer_sizes=50, max_iter=150,
random_state=0, activation='logistic', alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=[0, 1, 2, 3, 4])
assert_greater(mlp.score(X, y), 0.9)
示例8: test_partial_fit_classification
# 需要导入模块: from sklearn.neural_network import MLPClassifier [as 别名]
# 或者: from sklearn.neural_network.MLPClassifier import partial_fit [as 别名]
def test_partial_fit_classification():
# Test partial_fit on classification.
# `partial_fit` should yield the same results as 'fit'for binary and
# multi-class classification.
for X, y in classification_datasets:
X = X
y = y
mlp = MLPClassifier(algorithm='sgd', max_iter=100, random_state=1,
tol=0, alpha=1e-5, learning_rate_init=0.2)
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPClassifier(algorithm='sgd', random_state=1, alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=np.unique(y))
pred2 = mlp.predict(X)
assert_array_equal(pred1, pred2)
assert_greater(mlp.score(X, y), 0.95)
示例9: test_multilabel_classification
# 需要导入模块: from sklearn.neural_network import MLPClassifier [as 别名]
# 或者: from sklearn.neural_network.MLPClassifier import partial_fit [as 别名]
def test_multilabel_classification():
# Test that multi-label classification works as expected.
# test fit method
X, y = make_multilabel_classification(n_samples=50, random_state=0,
return_indicator=True)
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=50, alpha=1e-5,
max_iter=150, random_state=0, activation='logistic',
learning_rate_init=0.2)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.97)
# test partial fit method
mlp = MLPClassifier(solver='sgd', hidden_layer_sizes=50, max_iter=150,
random_state=0, activation='logistic', alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=[0, 1, 2, 3, 4])
assert_greater(mlp.score(X, y), 0.9)
# Make sure early stopping still work now that spliting is stratified by
# default (it is disabled for multilabel classification)
mlp = MLPClassifier(early_stopping=True)
mlp.fit(X, y).predict(X)
示例10: test_fit
# 需要导入模块: from sklearn.neural_network import MLPClassifier [as 别名]
# 或者: from sklearn.neural_network.MLPClassifier import partial_fit [as 别名]
def test_fit():
# Test that the algorithm solution is equal to a worked out example.
X = np.array([[0.6, 0.8, 0.7]])
y = np.array([0])
mlp = MLPClassifier(solver='sgd', learning_rate_init=0.1, alpha=0.1,
activation='logistic', random_state=1, max_iter=1,
hidden_layer_sizes=2, momentum=0)
# set weights
mlp.coefs_ = [0] * 2
mlp.intercepts_ = [0] * 2
mlp.n_outputs_ = 1
mlp.coefs_[0] = np.array([[0.1, 0.2], [0.3, 0.1], [0.5, 0]])
mlp.coefs_[1] = np.array([[0.1], [0.2]])
mlp.intercepts_[0] = np.array([0.1, 0.1])
mlp.intercepts_[1] = np.array([1.0])
mlp._coef_grads = [] * 2
mlp._intercept_grads = [] * 2
# Initialize parameters
mlp.n_iter_ = 0
mlp.learning_rate_ = 0.1
# Compute the number of layers
mlp.n_layers_ = 3
# Pre-allocate gradient matrices
mlp._coef_grads = [0] * (mlp.n_layers_ - 1)
mlp._intercept_grads = [0] * (mlp.n_layers_ - 1)
mlp.out_activation_ = 'logistic'
mlp.t_ = 0
mlp.best_loss_ = np.inf
mlp.loss_curve_ = []
mlp._no_improvement_count = 0
mlp._intercept_velocity = [np.zeros_like(intercepts) for
intercepts in
mlp.intercepts_]
mlp._coef_velocity = [np.zeros_like(coefs) for coefs in
mlp.coefs_]
mlp.partial_fit(X, y, classes=[0, 1])
# Manually worked out example
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.1 + 0.8 * 0.3 + 0.7 * 0.5 + 0.1)
# = 0.679178699175393
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.2 + 0.8 * 0.1 + 0.7 * 0 + 0.1)
# = 0.574442516811659
# o1 = g(h * W2 + b21) = g(0.679 * 0.1 + 0.574 * 0.2 + 1)
# = 0.7654329236196236
# d21 = -(0 - 0.765) = 0.765
# d11 = (1 - 0.679) * 0.679 * 0.765 * 0.1 = 0.01667
# d12 = (1 - 0.574) * 0.574 * 0.765 * 0.2 = 0.0374
# W1grad11 = X1 * d11 + alpha * W11 = 0.6 * 0.01667 + 0.1 * 0.1 = 0.0200
# W1grad11 = X1 * d12 + alpha * W12 = 0.6 * 0.0374 + 0.1 * 0.2 = 0.04244
# W1grad21 = X2 * d11 + alpha * W13 = 0.8 * 0.01667 + 0.1 * 0.3 = 0.043336
# W1grad22 = X2 * d12 + alpha * W14 = 0.8 * 0.0374 + 0.1 * 0.1 = 0.03992
# W1grad31 = X3 * d11 + alpha * W15 = 0.6 * 0.01667 + 0.1 * 0.5 = 0.060002
# W1grad32 = X3 * d12 + alpha * W16 = 0.6 * 0.0374 + 0.1 * 0 = 0.02244
# W2grad1 = h1 * d21 + alpha * W21 = 0.679 * 0.765 + 0.1 * 0.1 = 0.5294
# W2grad2 = h2 * d21 + alpha * W22 = 0.574 * 0.765 + 0.1 * 0.2 = 0.45911
# b1grad1 = d11 = 0.01667
# b1grad2 = d12 = 0.0374
# b2grad = d21 = 0.765
# W1 = W1 - eta * [W1grad11, .., W1grad32] = [[0.1, 0.2], [0.3, 0.1],
# [0.5, 0]] - 0.1 * [[0.0200, 0.04244], [0.043336, 0.03992],
# [0.060002, 0.02244]] = [[0.098, 0.195756], [0.2956664,
# 0.096008], [0.4939998, -0.002244]]
# W2 = W2 - eta * [W2grad1, W2grad2] = [[0.1], [0.2]] - 0.1 *
# [[0.5294], [0.45911]] = [[0.04706], [0.154089]]
# b1 = b1 - eta * [b1grad1, b1grad2] = 0.1 - 0.1 * [0.01667, 0.0374]
# = [0.098333, 0.09626]
# b2 = b2 - eta * b2grad = 1.0 - 0.1 * 0.765 = 0.9235
assert_almost_equal(mlp.coefs_[0], np.array([[0.098, 0.195756],
[0.2956664, 0.096008],
[0.4939998, -0.002244]]),
decimal=3)
assert_almost_equal(mlp.coefs_[1], np.array([[0.04706], [0.154089]]),
decimal=3)
assert_almost_equal(mlp.intercepts_[0],
np.array([0.098333, 0.09626]), decimal=3)
assert_almost_equal(mlp.intercepts_[1], np.array(0.9235), decimal=3)
# Testing output
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.098 + 0.8 * 0.2956664 +
# 0.7 * 0.4939998 + 0.098333) = 0.677
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.195756 + 0.8 * 0.096008 +
# 0.7 * -0.002244 + 0.09626) = 0.572
# o1 = h * W2 + b21 = 0.677 * 0.04706 +
# 0.572 * 0.154089 + 0.9235 = 1.043
# prob = sigmoid(o1) = 0.739
assert_almost_equal(mlp.predict_proba(X)[0, 1], 0.739, decimal=3)