本文整理汇总了Python中sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis.score方法的典型用法代码示例。如果您正苦于以下问题:Python QuadraticDiscriminantAnalysis.score方法的具体用法?Python QuadraticDiscriminantAnalysis.score怎么用?Python QuadraticDiscriminantAnalysis.score使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis
的用法示例。
在下文中一共展示了QuadraticDiscriminantAnalysis.score方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_QDA
# 需要导入模块: from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis import score [as 别名]
def get_QDA(Xtrain, Ytrain, Xtest = None , Ytest = None, verbose = 0):
qda = QDA()
qda.fit(Xtrain,Ytrain)
scores = np.empty((2))
if (verbose == 1):
scores[0] = qda.score(Xtrain,Ytrain)
print('QDA, train: {0:.02f}% '.format(scores[0]*100))
if (type(Xtest) != type(None)):
scores[1] = qda.score(Xtest,Ytest)
print('QDA, test: {0:.02f}% '.format(scores[1]*100))
return qda
示例2: QuadraticDiscriminantAnalysiscls
# 需要导入模块: from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis import score [as 别名]
class QuadraticDiscriminantAnalysiscls(object):
"""docstring for ClassName"""
def __init__(self):
self.qda_cls = QuadraticDiscriminantAnalysis()
self.prediction = None
self.train_x = None
self.train_y = None
def train_model(self, train_x, train_y):
try:
self.train_x = train_x
self.train_y = train_y
self.qda_cls.fit(train_x, train_y)
except:
print(traceback.format_exc())
def predict(self, test_x):
try:
self.test_x = test_x
self.prediction = self.qda_cls.predict(test_x)
return self.prediction
except:
print(traceback.format_exc())
def accuracy_score(self, test_y):
try:
# return r2_score(test_y, self.prediction)
return self.qda_cls.score(self.test_x, test_y)
except:
print(traceback.format_exc())
示例3: range
# 需要导入模块: from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis import score [as 别名]
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.cross_validation import train_test_split
total_score = 0
stop = 1000
for x in range(stop):
clf = QuadraticDiscriminantAnalysis()
data = win.getStudents()
data_train, data_test = train_test_split(data, test_size=0.2)
data_train_labels = [s.spec for s in data_train]
data_test_labels = [s.spec for s in data_test]
data_train = [s.grades for s in data_train]
data_test = [s.grades for s in data_test]
clf.fit(data_train, data_train_labels)
total_score += clf.score(data_test, data_test_labels)
total_score = total_score / stop
print("all")
print(total_score)
specs = ["FK", "FM", "MN", "OE"]
for sp in specs:
total_score = 0
for x in range(stop):
clf = QuadraticDiscriminantAnalysis()
data = win.getStudents()
data_train, data_test = train_test_split(data, test_size=0.2)
data_train_labels = [s.spec if s.spec == sp else "NOT " + sp for s in data_train]
data_test_labels = [s.spec if s.spec == sp else "NOT " + sp for s in data_test]
data_train = [s.grades for s in data_train]
data_test = [s.grades for s in data_test]
示例4: discriminatePlot
# 需要导入模块: from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis import score [as 别名]
def discriminatePlot(X, y, cVal, titleStr=''):
# Frederic's Robust Wrapper for discriminant analysis function. Performs lda, qda and RF afer error checking,
# Generates nice plots and returns cross-validated
# performance, stderr and base line.
# X np array n rows x p parameters
# y group labels n rows
# rgb color code for each data point - should be the same for each data beloging to the same group
# titleStr title for plots
# returns: ldaScore, ldaScoreSE, qdaScore, qdaScoreSE, rfScore, rfScoreSE, nClasses
# Global Parameters
CVFOLDS = 10
MINCOUNT = 10
MINCOUNTTRAINING = 5
# Initialize Variables and clean up data
classes, classesCount = np.unique(y, return_counts = True) # Classes to be discriminated should be same as ldaMod.classes_
goodIndClasses = np.array([n >= MINCOUNT for n in classesCount])
goodInd = np.array([b in classes[goodIndClasses] for b in y])
yGood = y[goodInd]
XGood = X[goodInd]
cValGood = cVal[goodInd]
classes, classesCount = np.unique(yGood, return_counts = True)
nClasses = classes.size # Number of classes or groups
# Do we have enough data?
if (nClasses < 2):
print 'Error in ldaPLot: Insufficient classes with minimun data (%d) for discrimination analysis' % (MINCOUNT)
return -1, -1, -1, -1 , -1, -1, -1
cvFolds = min(min(classesCount), CVFOLDS)
if (cvFolds < CVFOLDS):
print 'Warning in ldaPlot: Cross-validation performed with %d folds (instead of %d)' % (cvFolds, CVFOLDS)
# Data size and color values
nD = XGood.shape[1] # number of features in X
nX = XGood.shape[0] # number of data points in X
cClasses = [] # Color code for each class
for cl in classes:
icl = (yGood == cl).nonzero()[0][0]
cClasses.append(np.append(cValGood[icl],1.0))
cClasses = np.asarray(cClasses)
myPrior = np.ones(nClasses)*(1.0/nClasses)
# Perform a PCA for dimensionality reduction so that the covariance matrix can be fitted.
nDmax = int(np.fix(np.sqrt(nX/5)))
if nDmax < nD:
print 'Warning: Insufficient data for', nD, 'parameters. PCA projection to', nDmax, 'dimensions.'
nDmax = min(nD, nDmax)
pca = PCA(n_components=nDmax)
Xr = pca.fit_transform(XGood)
print 'Variance explained is %.2f%%' % (sum(pca.explained_variance_ratio_)*100.0)
# Initialise Classifiers
ldaMod = LDA(n_components = min(nDmax,nClasses-1), priors = myPrior, shrinkage = None, solver = 'svd')
qdaMod = QDA(priors = myPrior)
rfMod = RF() # by default assumes equal weights
# Perform CVFOLDS fold cross-validation to get performance of classifiers.
ldaScores = np.zeros(cvFolds)
qdaScores = np.zeros(cvFolds)
rfScores = np.zeros(cvFolds)
skf = cross_validation.StratifiedKFold(yGood, cvFolds)
iskf = 0
for train, test in skf:
# Enforce the MINCOUNT in each class for Training
trainClasses, trainCount = np.unique(yGood[train], return_counts=True)
goodIndClasses = np.array([n >= MINCOUNTTRAINING for n in trainCount])
goodIndTrain = np.array([b in trainClasses[goodIndClasses] for b in yGood[train]])
# Specity the training data set, the number of groups and priors
yTrain = yGood[train[goodIndTrain]]
XrTrain = Xr[train[goodIndTrain]]
trainClasses, trainCount = np.unique(yTrain, return_counts=True)
ntrainClasses = trainClasses.size
# Skip this cross-validation fold because of insufficient data
if ntrainClasses < 2:
continue
goodInd = np.array([b in trainClasses for b in yGood[test]])
if (goodInd.size == 0):
continue
# Fit the data
trainPriors = np.ones(ntrainClasses)*(1.0/ntrainClasses)
ldaMod.priors = trainPriors
qdaMod.priors = trainPriors
ldaMod.fit(XrTrain, yTrain)
qdaMod.fit(XrTrain, yTrain)
rfMod.fit(XrTrain, yTrain)
ldaScores[iskf] = ldaMod.score(Xr[test[goodInd]], yGood[test[goodInd]])
qdaScores[iskf] = qdaMod.score(Xr[test[goodInd]], yGood[test[goodInd]])
#.........这里部分代码省略.........
示例5: QuadDA
# 需要导入模块: from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis import score [as 别名]
def QuadDA(X_train, y_train, X_test, y_test):
clf = QDA()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
return accuracy
示例6: removeDuplicateRows
# 需要导入模块: from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis import score [as 别名]
import numpy as np
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
def removeDuplicateRows(a):
a = np.ascontiguousarray(a)
unique_a = np.unique(a.view([('', a.dtype)]*a.shape[1]))
return unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1]))
classes = ['red','yellow','green','orange']
for index,classs in enumerate(classes):
print (index,classs)
if index == 0:
data = removeDuplicateRows(np.loadtxt(classs))
target = np.zeros(len(data))
else:
clsdata = removeDuplicateRows(np.loadtxt(classs))
data = np.append(data,clsdata,axis=0)
target=np.append(target,np.zeros(len(clsdata))+index)
print (len(data), len(target))
#print (data)\n"
X_train,X_test,y_train,y_test = train_test_split(data,target,test_size=0.4,random_state=0)
clf = QuadraticDiscriminantAnalysis().fit(X_train,y_train)
print (clf.score(X_test,y_test))
joblib.dump(clf, 'rgbClassifier.pkl')