本文整理汇总了Python中sklearn.linear_model.RidgeClassifier.lower方法的典型用法代码示例。如果您正苦于以下问题:Python RidgeClassifier.lower方法的具体用法?Python RidgeClassifier.lower怎么用?Python RidgeClassifier.lower使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.linear_model.RidgeClassifier
的用法示例。
在下文中一共展示了RidgeClassifier.lower方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Eval
# 需要导入模块: from sklearn.linear_model import RidgeClassifier [as 别名]
# 或者: from sklearn.linear_model.RidgeClassifier import lower [as 别名]
def Eval(XTrain, YTrain, XTest, YTest, clf, return_predicted_labels=False):
"""
Inputs:
XTrain - N by D matrix of training data vectors
YTrain - N by 1 matrix of training class labels
XTest - M by D matrix of testin data vectors
YTrain - M by 1 matrix of testing class labels
clstr - the clustering function
either the string = "KMeans" or "GMM"
or a sklearn clustering instance
with the methods .fit and
Outputs:
A tuple containing (in the following order):
Accuracy
Overall Precision
Overall Recall
Overall F1 score
Avg. Precision per class
Avg. Recall per class
F1 Score
Precision per class
Recall per class
F1 Score per class
(if return_predicted_labels)
predicted class labels for each row in XTest
"""
if type(clf) == str:
if 'ridge' in clf.lower():
clf = RidgeClassifier(tol=1e-2, solver="lsqr")
elif "perceptron" in clf.lower():
clf = Perceptron(n_iter=50)
elif "passive aggressive" in clf.lower() or 'passive-aggressive' in clf.lower():
clf = PassiveAggressiveClassifier(n_iter=50)
elif 'linsvm' in clf.lower() or 'linearsvm' in clf.lower() or 'linearsvc' in clf.lower():
clf = LinearSVC()
elif 'svm' in clf.lower() or 'svc' in clf.lower():
clf = SVC()
elif 'sgd' in clf.lower():
clf = SGDClassifier()
clf.fit(XTrain, YTrain)
YPred = clf.predict(XTest)
accuracy = sklearn.metrics.accuracy_score(YTest, YPred)
(overall_precision, overall_recall, overall_f1, support) = sklearn.metrics.precision_recall_fscore_support(YTest, YPred, average='micro')
(precision_per_class, recall_per_class, f1_per_class, support_per_class) = sklearn.metrics.precision_recall_fscore_support(YTest, YPred)
avg_precision_per_class = np.mean(precision_per_class)
avg_recall_per_class = np.mean(recall_per_class)
avg_f1_per_class = np.mean(f1_per_class)
del clf
if return_predicted_labels:
return (accuracy, overall_precision, overall_recall, overall_f1, avg_precision_per_class, avg_recall_per_class, avg_f1_per_class, precision_per_class, recall_per_class, f1_per_class, YPred)
else:
return (accuracy, overall_precision, overall_recall, overall_f1, avg_precision_per_class, avg_recall_per_class, avg_f1_per_class, precision_per_class, recall_per_class, f1_per_class)