本文整理汇总了Python中sklearn.gaussian_process.GaussianProcessClassifier.predict_proba方法的典型用法代码示例。如果您正苦于以下问题:Python GaussianProcessClassifier.predict_proba方法的具体用法?Python GaussianProcessClassifier.predict_proba怎么用?Python GaussianProcessClassifier.predict_proba使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.gaussian_process.GaussianProcessClassifier
的用法示例。
在下文中一共展示了GaussianProcessClassifier.predict_proba方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_multi_class_n_jobs
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessClassifier [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessClassifier import predict_proba [as 别名]
def test_multi_class_n_jobs(kernel):
# Test that multi-class GPC produces identical results with n_jobs>1.
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
gpc_2 = GaussianProcessClassifier(kernel=kernel, n_jobs=2)
gpc_2.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
y_prob_2 = gpc_2.predict_proba(X2)
assert_almost_equal(y_prob, y_prob_2)
示例2: build_classifier_gp
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessClassifier [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessClassifier import predict_proba [as 别名]
def build_classifier_gp(data, labels, **kwargs):
linear_kernel = Sum(k1=Product(k1=DotProduct(sigma_0=0, sigma_0_bounds='fixed'), k2=ConstantKernel()),
k2=ConstantKernel())
gp_clf = GaussianProcessClassifier(kernel=linear_kernel)
gp_clf.fit(data, labels)
id_pos_class = gp_clf.classes_ == labels.max()
return gp_clf, gp_clf.predict_proba(data)[:, id_pos_class]
示例3: test_predict_consistent
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessClassifier [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessClassifier import predict_proba [as 别名]
def test_predict_consistent():
""" Check binary predict decision has also predicted probability above 0.5.
"""
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_array_equal(gpc.predict(X),
gpc.predict_proba(X)[:, 1] >= 0.5)
示例4: test_multi_class
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessClassifier [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessClassifier import predict_proba [as 别名]
def test_multi_class(kernel):
# Test GPC for multi-class classification problems.
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
assert_almost_equal(y_prob.sum(1), 1)
y_pred = gpc.predict(X2)
assert_array_equal(np.argmax(y_prob, 1), y_pred)
示例5: C
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessClassifier [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessClassifier import predict_proba [as 别名]
y = np.array(g(X) > 0, dtype=int)
# Instanciate and fit Gaussian Process Model
kernel = C(0.1, (1e-5, np.inf)) * DotProduct(sigma_0=0.1) ** 2
gp = GaussianProcessClassifier(kernel=kernel)
gp.fit(X, y)
print("Learned kernel: %s " % gp.kernel_)
# Evaluate real function and the predicted probability
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_prob = gp.predict_proba(xx)[:, 1]
y_true = y_true.reshape((res, res))
y_prob = y_prob.reshape((res, res))
# Plot the probabilistic classification iso-values
fig = plt.figure(1)
ax = fig.gca()
ax.axes.set_aspect('equal')
plt.xticks([])
plt.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
cax = plt.imshow(y_prob, cmap=cm.gray_r, alpha=0.8,
示例6: print
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessClassifier [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessClassifier import predict_proba [as 别名]
gp_opt.fit(X[:train_size], y[:train_size])
print("Log Marginal Likelihood (initial): %.3f" % gp_fix.log_marginal_likelihood(gp_fix.kernel_.theta))
print("Log Marginal Likelihood (optimized): %.3f" % gp_opt.log_marginal_likelihood(gp_opt.kernel_.theta))
print(
"Accuracy: %.3f (initial) %.3f (optimized)"
% (
accuracy_score(y[:train_size], gp_fix.predict(X[:train_size])),
accuracy_score(y[:train_size], gp_opt.predict(X[:train_size])),
)
)
print(
"Log-loss: %.3f (initial) %.3f (optimized)"
% (
log_loss(y[:train_size], gp_fix.predict_proba(X[:train_size])[:, 1]),
log_loss(y[:train_size], gp_opt.predict_proba(X[:train_size])[:, 1]),
)
)
# Plot posteriors
plt.figure(0)
plt.scatter(X[:train_size, 0], y[:train_size], c="k", label="Train data")
plt.scatter(X[train_size:, 0], y[train_size:], c="g", label="Test data")
X_ = np.linspace(0, 5, 100)
plt.plot(X_, gp_fix.predict_proba(X_[:, np.newaxis])[:, 1], "r", label="Initial kernel: %s" % gp_fix.kernel_)
plt.plot(X_, gp_opt.predict_proba(X_[:, np.newaxis])[:, 1], "b", label="Optimized kernel: %s" % gp_opt.kernel_)
plt.xlabel("Feature")
plt.ylabel("Class 1 probability")
plt.xlim(0, 5)
示例7: RBF
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessClassifier [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessClassifier import predict_proba [as 别名]
xx, yy = np.meshgrid(np.linspace(-3, 3, 50),
np.linspace(-3, 3, 50))
rng = np.random.RandomState(0)
X = rng.randn(200, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
plt.figure(figsize=(10, 5))
kernels = [1.0 * RBF(length_scale=1.0), 1.0 * DotProduct(sigma_0=1.0)**2]
for i, kernel in enumerate(kernels):
clf = GaussianProcessClassifier(kernel=kernel, warm_start=True).fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.predict_proba(np.vstack((xx.ravel(), yy.ravel())).T)[:, 1]
Z = Z.reshape(xx.shape)
plt.subplot(1, 2, i + 1)
image = plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
aspect='auto', origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.colorbar(image)
plt.title("%s\n Log-Marginal-Likelihood:%.3f"
% (clf.kernel_, clf.log_marginal_likelihood(clf.kernel_.theta)),
示例8: trainPredict
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessClassifier [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessClassifier import predict_proba [as 别名]
def trainPredict(subjectid, makeplot=False):
print("testing participant " + subjectid)
# Load training data from the file matlab generates
traindata = np.genfromtxt('csvdata/' + subjectid +
'_sim.csv', delimiter=',',
missing_values=['NaN', 'nan'],
filling_values=None)
# Clean + downsample this data
trainx, trainy = cleandata(traindata, downsamplefactor=20)
# Train a Gaussian Process
anisokern = kernels.RBF() # default kernel
gp = GaussianProcessClassifier(kernel=anisokern) # Initialize the GPC
gp.fit(trainx, trainy) # train this class on the data
trainx = trainy = None # Discard all training data to preserve memory
# load test data
testdata = np.genfromtxt('csvdata/' + subjectid +
'_rival.csv', delimiter=',',
missing_values=['NaN', 'nan'],
filling_values=None)
testx, testy = cleandata(testdata, downsamplefactor=4) # clean data
testdata = None # clear from memory
# work out percentage in percept for each data point:
percentages, nextpercept = assign_percentage(testy)
# get a prediction for all points in the test data:
predicty = gp.predict(testx)
proby = gp.predict_proba(testx)
if makeplot:
summaryplot(participant, testx, testy, predicty, proby, gp)
# Summarise prediction by reported percept
meanprediction = {'mean' + percept:
proby[testy == value, 1].mean()
for percept, value in perceptindices.iteritems()}
predictiondev = {'stdev' + percept:
proby[testy == value, 1].std()
for percept, value in perceptindices.iteritems()}
predictionaccuracy = {'acc' + percept:
(predicty[testy == value] ==
testy[testy == value]).mean()
for percept, value in perceptindices.iteritems()}
# Summarise prediction by percentage in percept
predictioncourse = {'timecourse' + percept + str(cutoff):
proby[(testy == value) &
(percentages < cutoff) &
(percentages > cutoff - 0.1), 1].mean()
for percept, value in perceptindices.iteritems()
for cutoff in np.linspace(0.1, 1, 10)}
# Summarise mixed percept time courses by the next percept
nextcourse = {'nextcourse' + percept + str(cutoff):
proby[(testy == 0) &
(percentages < cutoff) &
(percentages > cutoff - 0.1) &
(nextpercept == perceptindices[percept]), 1].mean()
for percept in ['highfreq', 'lowfreq']
for cutoff in np.linspace(0.1, 1, 10)}
afterdominant = {'after' + percept + "_" + after + "_" + str(cutoff):
proby[(testy == perceptindices[percept]) &
(percentages < cutoff) &
(percentages > cutoff - 0.1) &
(nextpercept == perceptindices[after]), 1].mean()
for percept, after in [('highfreq', 'mixed'),
('highfreq', 'lowfreq'),
('lowfreq', 'mixed'),
('lowfreq', 'highfreq')]
for cutoff in np.linspace(0.1, 1, 10)}
# Only return the summarised data
return meanprediction, predictiondev, predictionaccuracy, \
predictioncourse, nextcourse, afterdominant
示例9: plot
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessClassifier [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessClassifier import predict_proba [as 别名]
def plot(df, options):
UNIQ_GROUPS = df.group.unique()
UNIQ_GROUPS.sort()
sns.set_style("white")
grppal = sns.color_palette("Set2", len(UNIQ_GROUPS))
print '# UNIQ GROUPS', UNIQ_GROUPS
cent_stats = df.groupby(
['position', 'group', 'side']).apply(stats_per_group)
cent_stats.reset_index(inplace=True)
import time
from sklearn import preprocessing
from sklearn.gaussian_process import GaussianProcessRegressor, GaussianProcessClassifier
from sklearn.gaussian_process.kernels import Matern, WhiteKernel, ExpSineSquared, ConstantKernel, RBF
ctlDF = cent_stats[ cent_stats['group'] == 0 ]
TNRightDF = cent_stats[ cent_stats['group'] != 0]
TNRightDF = TNRightDF[TNRightDF['side'] == 'right']
dataDf = pd.concat([ctlDF, TNRightDF], ignore_index=True)
print dataDf
yDf = dataDf['group'] == 0
yDf = yDf.astype(int)
y = yDf.values
print y
print y.shape
XDf = dataDf[['position', 'values']]
X = XDf.values
X = preprocessing.scale(X)
print X
print X.shape
# kernel = ConstantKernel() + Matern(length_scale=mean, nu=3 / 2) + \
# WhiteKernel(noise_level=1e-10)
kernel = 1**2 * Matern(length_scale=1, nu=1.5) + \
WhiteKernel(noise_level=0.1)
figure = plt.figure(figsize=(10, 6))
stime = time.time()
gp = GaussianProcessClassifier(kernel)
gp.fit(X, y)
print gp.kernel_
print gp.log_marginal_likelihood()
print("Time for GPR fitting: %.3f" % (time.time() - stime))
# create a mesh to plot in
h = 0.1
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
plt.figure(figsize=(10, 5))
# Plot the predicted probabilities. For that, we will assign a color to
# each point in the mesh [x_min, m_max]x[y_min, y_max].
Z = gp.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:,1]
print Z
print Z.shape
# Put the result into a color plot
Z = Z.reshape((xx.shape[0], xx.shape[1]))
print Z.shape
plt.imshow(Z, extent=(x_min, x_max, y_min, y_max), origin="lower")
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=np.array(["r", "g"])[y])
plt.xlabel('position')
plt.ylabel('normalized val')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title("%s, LML: %.3f" %
("TN vs. Control", gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
if options.title:
plt.suptitle(options.title)
if options.output:
plt.savefig(options.output, dpi=150)
#.........这里部分代码省略.........