本文整理汇总了Python中sklearn.metrics.SCORERS类的典型用法代码示例。如果您正苦于以下问题:Python SCORERS类的具体用法?Python SCORERS怎么用?Python SCORERS使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SCORERS类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_scorer_sample_weight
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0], random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy="median")
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict(
[(name, sensible_regr) for name in REGRESSION_SCORERS]
+ [(name, sensible_clf) for name in CLF_SCORERS]
+ [(name, sensible_ml_clf) for name in MULTILABEL_ONLY_SCORERS]
)
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target, sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(
weighted,
unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted),
)
assert_almost_equal(
weighted,
ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted, ignored),
)
except TypeError as e:
assert_true(
"sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called " "with sample weights: {1}".format(name, str(e)),
)
示例2: test_scorer_sample_weight
def test_scorer_sample_weight():
"""Test that scorers support sample_weight or raise sensible errors"""
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier()
sensible_clf.fit(X_train, y_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS])
for name, scorer in SCORERS.items():
try:
weighted = scorer(estimator[name], X_test, y_test,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], y_test[10:])
unweighted = scorer(estimator[name], X_test, y_test)
assert_not_equal(weighted, unweighted,
"scorer {0} behaves identically when called with "
"sample weights: {1} vs {2}".format(name,
weighted,
unweighted))
assert_equal(weighted, ignored,
"scorer {0} behaves differently when ignoring "
"samples and setting sample_weight to 0: "
"{1} vs {2}".format(name, weighted, ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
示例3: make_scorer
from .version import __version__, VERSION
__all__ = ['Learner', 'load_examples', 'kappa', 'kendall_tau', 'spearman',
'pearson', 'f1_score_least_frequent', 'run_configuration',
'run_ablation', 'write_feature_file', 'convert_examples']
# Add our scorers to the sklearn dictionary here so that they will always be
# available if you import anything from skll
_scorers = {'f1_score_micro': make_scorer(f1_score, average='micro',
pos_label=None),
'f1_score_macro': make_scorer(f1_score, average='macro',
pos_label=None),
'f1_score_weighted': make_scorer(f1_score, average='weighted',
pos_label=None),
'f1_score_least_frequent': make_scorer(f1_score_least_frequent),
'pearson': make_scorer(pearson),
'spearman': make_scorer(spearman),
'kendall_tau': make_scorer(kendall_tau),
'unweighted_kappa': make_scorer(kappa),
'quadratic_weighted_kappa': make_scorer(kappa,
weights='quadratic'),
'linear_weighted_kappa': make_scorer(kappa, weights='linear'),
'qwk_off_by_one': make_scorer(kappa, weights='quadratic',
allow_off_by_one=True),
'lwk_off_by_one': make_scorer(kappa, weights='linear',
allow_off_by_one=True),
'uwk_off_by_one': make_scorer(kappa, allow_off_by_one=True)}
SCORERS.update(_scorers)
示例4: test_scorer_memmap_input
def test_scorer_memmap_input():
# Non-regression test for #6147: some score functions would
# return singleton memmap when computed on memmap data instead of scalar
# float values.
for name in SCORERS.keys():
yield check_scorer_memmap, name
示例5: test_all_scorers_repr
def test_all_scorers_repr():
# Test that all scorers have a working repr
for name, scorer in SCORERS.items():
repr(scorer)