本文整理汇总了Python中sklearn.discriminant_analysis.LinearDiscriminantAnalysis.priors方法的典型用法代码示例。如果您正苦于以下问题:Python LinearDiscriminantAnalysis.priors方法的具体用法?Python LinearDiscriminantAnalysis.priors怎么用?Python LinearDiscriminantAnalysis.priors使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.discriminant_analysis.LinearDiscriminantAnalysis
的用法示例。
在下文中一共展示了LinearDiscriminantAnalysis.priors方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: discriminatePlot
# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import priors [as 别名]
def discriminatePlot(X, y, cVal, titleStr=''):
# Frederic's Robust Wrapper for discriminant analysis function. Performs lda, qda and RF afer error checking,
# Generates nice plots and returns cross-validated
# performance, stderr and base line.
# X np array n rows x p parameters
# y group labels n rows
# rgb color code for each data point - should be the same for each data beloging to the same group
# titleStr title for plots
# returns: ldaScore, ldaScoreSE, qdaScore, qdaScoreSE, rfScore, rfScoreSE, nClasses
# Global Parameters
CVFOLDS = 10
MINCOUNT = 10
MINCOUNTTRAINING = 5
# Initialize Variables and clean up data
classes, classesCount = np.unique(y, return_counts = True) # Classes to be discriminated should be same as ldaMod.classes_
goodIndClasses = np.array([n >= MINCOUNT for n in classesCount])
goodInd = np.array([b in classes[goodIndClasses] for b in y])
yGood = y[goodInd]
XGood = X[goodInd]
cValGood = cVal[goodInd]
classes, classesCount = np.unique(yGood, return_counts = True)
nClasses = classes.size # Number of classes or groups
# Do we have enough data?
if (nClasses < 2):
print 'Error in ldaPLot: Insufficient classes with minimun data (%d) for discrimination analysis' % (MINCOUNT)
return -1, -1, -1, -1 , -1, -1, -1
cvFolds = min(min(classesCount), CVFOLDS)
if (cvFolds < CVFOLDS):
print 'Warning in ldaPlot: Cross-validation performed with %d folds (instead of %d)' % (cvFolds, CVFOLDS)
# Data size and color values
nD = XGood.shape[1] # number of features in X
nX = XGood.shape[0] # number of data points in X
cClasses = [] # Color code for each class
for cl in classes:
icl = (yGood == cl).nonzero()[0][0]
cClasses.append(np.append(cValGood[icl],1.0))
cClasses = np.asarray(cClasses)
myPrior = np.ones(nClasses)*(1.0/nClasses)
# Perform a PCA for dimensionality reduction so that the covariance matrix can be fitted.
nDmax = int(np.fix(np.sqrt(nX/5)))
if nDmax < nD:
print 'Warning: Insufficient data for', nD, 'parameters. PCA projection to', nDmax, 'dimensions.'
nDmax = min(nD, nDmax)
pca = PCA(n_components=nDmax)
Xr = pca.fit_transform(XGood)
print 'Variance explained is %.2f%%' % (sum(pca.explained_variance_ratio_)*100.0)
# Initialise Classifiers
ldaMod = LDA(n_components = min(nDmax,nClasses-1), priors = myPrior, shrinkage = None, solver = 'svd')
qdaMod = QDA(priors = myPrior)
rfMod = RF() # by default assumes equal weights
# Perform CVFOLDS fold cross-validation to get performance of classifiers.
ldaScores = np.zeros(cvFolds)
qdaScores = np.zeros(cvFolds)
rfScores = np.zeros(cvFolds)
skf = cross_validation.StratifiedKFold(yGood, cvFolds)
iskf = 0
for train, test in skf:
# Enforce the MINCOUNT in each class for Training
trainClasses, trainCount = np.unique(yGood[train], return_counts=True)
goodIndClasses = np.array([n >= MINCOUNTTRAINING for n in trainCount])
goodIndTrain = np.array([b in trainClasses[goodIndClasses] for b in yGood[train]])
# Specity the training data set, the number of groups and priors
yTrain = yGood[train[goodIndTrain]]
XrTrain = Xr[train[goodIndTrain]]
trainClasses, trainCount = np.unique(yTrain, return_counts=True)
ntrainClasses = trainClasses.size
# Skip this cross-validation fold because of insufficient data
if ntrainClasses < 2:
continue
goodInd = np.array([b in trainClasses for b in yGood[test]])
if (goodInd.size == 0):
continue
# Fit the data
trainPriors = np.ones(ntrainClasses)*(1.0/ntrainClasses)
ldaMod.priors = trainPriors
qdaMod.priors = trainPriors
ldaMod.fit(XrTrain, yTrain)
qdaMod.fit(XrTrain, yTrain)
rfMod.fit(XrTrain, yTrain)
ldaScores[iskf] = ldaMod.score(Xr[test[goodInd]], yGood[test[goodInd]])
qdaScores[iskf] = qdaMod.score(Xr[test[goodInd]], yGood[test[goodInd]])
#.........这里部分代码省略.........