本文整理汇总了Python中sklearn.learning_curve.learning_curve方法的典型用法代码示例。如果您正苦于以下问题:Python learning_curve.learning_curve方法的具体用法?Python learning_curve.learning_curve怎么用?Python learning_curve.learning_curve使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.learning_curve
的用法示例。
在下文中一共展示了learning_curve.learning_curve方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_learning_curve
# 需要导入模块: from sklearn import learning_curve [as 别名]
# 或者: from sklearn.learning_curve import learning_curve [as 别名]
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
示例2: test_learning_curve_verbose
# 需要导入模块: from sklearn import learning_curve [as 别名]
# 或者: from sklearn.learning_curve import learning_curve [as 别名]
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
示例3: test_learning_curve_batch_and_incremental_learning_are_equal
# 需要导入模块: from sklearn import learning_curve [as 别名]
# 或者: from sklearn.learning_curve import learning_curve [as 别名]
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
示例4: plot_learning_curve
# 需要导入模块: from sklearn import learning_curve [as 别名]
# 或者: from sklearn.learning_curve import learning_curve [as 别名]
def plot_learning_curve(loss_train_record, loss_valid_record):
plt.figure()
plt.plot(loss_train_record, label='train')
plt.plot(loss_valid_record, c='r', label='validation')
plt.ylabel("RMSE")
plt.legend(loc='upper left', frameon=False)
plt.savefig("data/learning_curve.png")
示例5: plot_learning_curve
# 需要导入模块: from sklearn import learning_curve [as 别名]
# 或者: from sklearn.learning_curve import learning_curve [as 别名]
def plot_learning_curve(self):
print " + Plotting learning curve (this will take some time)...",
(X_train, y_train) = self._train_data
plt.figure()
plt.title("Learning curve (%s)" % self._learner)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
self._clf[self._learner],
X_train, y_train,
cv=5)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(
train_sizes,
train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std,
alpha=0.1,
color="r")
plt.fill_between(
train_sizes,
test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std,
alpha=0.1,
color="g")
plt.plot(
train_sizes, train_scores_mean,
'o-', color="r",
label="Training score")
plt.plot(
train_sizes, test_scores_mean,
'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
plt.show()
print "done."
# Plot the ROC curve that results from each of our classifiers
示例6: plot_learning_curve
# 需要导入模块: from sklearn import learning_curve [as 别名]
# 或者: from sklearn.learning_curve import learning_curve [as 别名]
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
train_sizes=np.linspace(.1, 1.0, 5)):
if os.name == 'nt':
n_jobs = 1
else:
n_jobs = -1
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("?????")
plt.ylabel("???")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs,
scoring='accuracy', train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="???")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="?????????")
plt.legend(loc="best")
return plt
# ???
示例7: plot_learning_curve
# 需要导入模块: from sklearn import learning_curve [as 别名]
# 或者: from sklearn.learning_curve import learning_curve [as 别名]
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
from sklearn.learning_curve import learning_curve
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
plt.show()
示例8: GenerateLearningCurve
# 需要导入模块: from sklearn import learning_curve [as 别名]
# 或者: from sklearn.learning_curve import learning_curve [as 别名]
def GenerateLearningCurve(estimator, X, y):
# generate learning curve data
#todo: edit parameters
num_resources = -1 #engage all available CPUs and GPUs
train_sizes, train_scores, test_scores = learning_curve(estimator=estimator, X=X, y=y, train_sizes= np.linspace(0.1, 1.0, 10), cv=10, n_jobs=num_resources, scoring='mean_squared_error')
train_mean = np.mean(train_scores, axis = 1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
PlotLearningCurve(train_sizes, train_mean, train_std, test_mean, test_std)
示例9: test_learning_curve
# 需要导入模块: from sklearn import learning_curve [as 别名]
# 或者: from sklearn.learning_curve import learning_curve [as 别名]
def test_learning_curve():
digits = load_digits()
X,y=digits.data,digits.target
train_sizes=np.linspace(0.1,1.0,endpoint=True,dtype='float')
abs_trains_sizes,train_scores, test_scores = learning_curve(LinearSVC(),
X, y,cv=10, scoring="accuracy",train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
ax.plot(abs_trains_sizes, train_scores_mean, label="Training Accuracy", color="r")
ax.fill_between(abs_trains_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
ax.plot(abs_trains_sizes, test_scores_mean, label="Testing Accuracy", color="g")
ax.fill_between(abs_trains_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
ax.set_title("Learning Curve with LinearSVC")
ax.set_xlabel("Sample Nums")
ax.set_ylabel("Score")
ax.set_ylim(0,1.1)
ax.legend(loc='best')
plt.show()
示例10: test_learning_curve_unsupervised
# 需要导入模块: from sklearn import learning_curve [as 别名]
# 或者: from sklearn.learning_curve import learning_curve [as 别名]
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
示例11: test_learning_curve_incremental_learning_not_possible
# 需要导入模块: from sklearn import learning_curve [as 别名]
# 或者: from sklearn.learning_curve import learning_curve [as 别名]
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
示例12: test_learning_curve_incremental_learning
# 需要导入模块: from sklearn import learning_curve [as 别名]
# 或者: from sklearn.learning_curve import learning_curve [as 别名]
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
示例13: test_learning_curve_n_sample_range_out_of_bounds
# 需要导入模块: from sklearn import learning_curve [as 别名]
# 或者: from sklearn.learning_curve import learning_curve [as 别名]
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
示例14: test_learning_curve_remove_duplicate_sample_sizes
# 需要导入模块: from sklearn import learning_curve [as 别名]
# 或者: from sklearn.learning_curve import learning_curve [as 别名]
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
示例15: test_learning_curve_with_boolean_indices
# 需要导入模块: from sklearn import learning_curve [as 别名]
# 或者: from sklearn.learning_curve import learning_curve [as 别名]
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))