本文整理汇总了Python中sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis.predict_proba方法的典型用法代码示例。如果您正苦于以下问题:Python QuadraticDiscriminantAnalysis.predict_proba方法的具体用法?Python QuadraticDiscriminantAnalysis.predict_proba怎么用?Python QuadraticDiscriminantAnalysis.predict_proba使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis
的用法示例。
在下文中一共展示了QuadraticDiscriminantAnalysis.predict_proba方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: QuadraticDiscriminantAnalysisPredictor
# 需要导入模块: from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis import predict_proba [as 别名]
class QuadraticDiscriminantAnalysisPredictor(PredictorBase):
'''
Quadratic Discriminant Analysis
'''
def __init__(self):
self.clf = QuadraticDiscriminantAnalysis()
def fit(self, X_train, y_train):
self.clf.fit(X_train, y_train)
def predict(self, X_test):
predictions = self.clf.predict_proba(X_test)
predictions_df = self.bundle_predictions(predictions)
return predictions_df
def get_k_best_k(self):
return 4
开发者ID:paul-reiners,项目名称:kaggle-shelter-animal-outcomes,代码行数:21,代码来源:quadratic_descriminant_analysis_predictor.py
示例2: test_qda
# 需要导入模块: from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis import predict_proba [as 别名]
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
assert_array_equal(y_pred, y6)
# Assure that it works with 1D data
y_pred1 = clf.fit(X7, y6).predict(X7)
assert_array_equal(y_pred1, y6)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X7)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y6)
y_log_proba_pred1 = clf.predict_log_proba(X7)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X6, y7).predict(X6)
# QDA shouldn't be able to separate those
assert np.any(y_pred3 != y7)
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X6, y4)
示例3: discriminatePlot
# 需要导入模块: from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis import predict_proba [as 别名]
#.........这里部分代码省略.........
print 'LDA Weights:'
print 'DFA1:', ldaMod.coef_[0,:]
if nClasses > 2:
print 'DFA2:', ldaMod.coef_[1,:]
if nClasses > 3:
print 'DFA3:', ldaMod.coef_[2,:]
# Obtain fits in this rotated space for display purposes
ldaMod.fit(Xrr, yGood)
qdaMod.fit(Xrr, yGood)
rfMod.fit(Xrr, yGood)
XrrMean = Xrr.mean(0)
# Make a mesh for plotting
x1, x2 = np.meshgrid(np.arange(-6.0, 6.0, 0.1), np.arange(-6.0, 6.0, 0.1))
xm1 = np.reshape(x1, -1)
xm2 = np.reshape(x2, -1)
nxm = np.size(xm1)
Xm = np.zeros((nxm, Xrr.shape[1]))
Xm[:,0] = xm1
if Xrr.shape[1] > 1 :
Xm[:,1] = xm2
for ix in range(2,Xrr.shape[1]):
Xm[:,ix] = np.squeeze(np.ones((nxm,1)))*XrrMean[ix]
XmcLDA = np.zeros((nxm, 4)) # RGBA values for color for LDA
XmcQDA = np.zeros((nxm, 4)) # RGBA values for color for QDA
XmcRF = np.zeros((nxm, 4)) # RGBA values for color for RF
# Predict values on mesh for plotting based on the first two DFs
yPredLDA = ldaMod.predict_proba(Xm)
yPredQDA = qdaMod.predict_proba(Xm)
yPredRF = rfMod.predict_proba(Xm)
# Transform the predictions in color codes
maxLDA = yPredLDA.max()
for ix in range(nxm) :
cWeight = yPredLDA[ix,:] # Prob for all classes
cWinner = ((cWeight == cWeight.max()).astype('float')) # Winner takes all
# XmcLDA[ix,:] = np.dot(cWeight, cClasses)/nClasses
XmcLDA[ix,:] = np.dot(cWinner, cClasses)
XmcLDA[ix,3] = cWeight.max()/maxLDA
# Plot the surface of probability
plt.figure(facecolor='white', figsize=(10,3))
plt.subplot(131)
Zplot = XmcLDA.reshape(np.shape(x1)[0], np.shape(x1)[1],4)
plt.imshow(Zplot, zorder=0, extent=[-6, 6, -6, 6], origin='lower', interpolation='none', aspect='auto')
if nClasses > 2:
plt.scatter(Xrr[:,0], Xrr[:,1], c=cValGood, s=40, zorder=1)
else:
plt.scatter(Xrr,(np.random.rand(Xrr.size)-0.5)*12.0 , c=cValGood, s=40, zorder=1)
plt.title('%s: LDA pC %.0f %%' % (titleStr, (ldaScores.mean()*100.0)))
plt.axis('square')
plt.xlim((-6, 6))
plt.ylim((-6, 6))
plt.xlabel('DFA 1')
plt.ylabel('DFA 2')
# Transform the predictions in color codes
maxQDA = yPredQDA.max()
示例4: QuadraticDiscriminantAnalysis
# 需要导入模块: from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis import predict_proba [as 别名]
# LDA model
lda = QuadraticDiscriminantAnalysis()
lda.fit(comps, labels)
y_pred = lda.predict(comps)
print(labels)
print(y_pred)
mcc = matthews_corrcoef(labels,y_pred)
print("MCC="+str(mcc))
# Plotting LDA contour
nx, ny = 200, 100
x_min, x_max = np.amin(comps[:,0]), np.amax(comps[:,0])
y_min, y_max = np.amin(comps[:,1]), np.amax(comps[:,1])
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.contour(xx, yy, Z, [0.5], linewidths=5, colors = 'k', linestyles = 'dashed')
# Plotting LDA means
#plt.plot(lda.means_[0][0], lda.means_[0][1],'o', color='black', markersize=10)
#plt.plot(lda.means_[1][0], lda.means_[1][1],'o', color='black', markersize=10)
plt.title('PCA with QDA')
# Plot red and green data
output_red = comps[0:26]
output_green = comps[27:52]
#plt.scatter(output_red[:, 0], output_red[:,1], color='r')
#plt.scatter(output_green[:, 0], output_green[:, 1],color='g')
plt.show()
示例5: LDA
# 需要导入模块: from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis import predict_proba [as 别名]
What is the training misclassification rate?
"""
lda1 = LDA(solver="svd", store_covariance=True)
lda1.fit(warX,warY)
my_lda_pred = pd.DataFrame()
my_lda_pred["pred"] = ["No" if x == 0 else "Yes" for x in lda1.predict(warX)]
my_lda_pred["actual"] = ["No" if x == 0 else "Yes" for x in war["start"]]
conf_lda = pd.crosstab(my_lda_pred["pred"], my_lda_pred["actual"])
conf_lda
(1/(war.shape[0])) * (conf_lda.iloc[1,0] + conf_lda.iloc[0,1])
"""
6.69%
"""
qda1 = QDA(store_covariances=True)
qda1.fit(warX,warY)
test = qda1.predict_proba(warX)
my_qda_pred = pd.DataFrame()
my_qda_pred["pred"] = ["No" if x < .5 else "Yes" for x in qda1.predict(warX)]
my_qda_pred["actual"] = ["No" if x == 0 else "Yes" for x in war["start"]]
conf_qda = pd.crosstab(my_qda_pred["pred"], my_qda_pred["actual"])
conf_qda
(1/(war.shape[0])) * (conf_qda.iloc[1,0] + conf_qda.iloc[0,1])