本文整理汇总了Python中sklearn.gaussian_process.GaussianProcessClassifier类的典型用法代码示例。如果您正苦于以下问题:Python GaussianProcessClassifier类的具体用法?Python GaussianProcessClassifier怎么用?Python GaussianProcessClassifier使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了GaussianProcessClassifier类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_lml_improving
def test_lml_improving():
""" Test that hyperparameter-tuning improves log-marginal likelihood. """
for kernel in kernels:
if kernel == fixed_kernel: continue
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(kernel.theta))
示例2: test_predict_consistent
def test_predict_consistent():
""" Check binary predict decision has also predicted probability above 0.5.
"""
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_array_equal(gpc.predict(X),
gpc.predict_proba(X)[:, 1] >= 0.5)
示例3: build_classifier_gp
def build_classifier_gp(data, labels, **kwargs):
linear_kernel = Sum(k1=Product(k1=DotProduct(sigma_0=0, sigma_0_bounds='fixed'), k2=ConstantKernel()),
k2=ConstantKernel())
gp_clf = GaussianProcessClassifier(kernel=linear_kernel)
gp_clf.fit(data, labels)
id_pos_class = gp_clf.classes_ == labels.max()
return gp_clf, gp_clf.predict_proba(data)[:, id_pos_class]
示例4: test_converged_to_local_maximum
def test_converged_to_local_maximum(kernel):
# Test that we are in local maximum after hyperparameter-optimization.
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpc.log_marginal_likelihood(gpc.kernel_.theta, True)
assert np.all((np.abs(lml_gradient) < 1e-4) |
(gpc.kernel_.theta == gpc.kernel_.bounds[:, 0]) |
(gpc.kernel_.theta == gpc.kernel_.bounds[:, 1]))
示例5: test_multi_class
def test_multi_class(kernel):
# Test GPC for multi-class classification problems.
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
assert_almost_equal(y_prob.sum(1), 1)
y_pred = gpc.predict(X2)
assert_array_equal(np.argmax(y_prob, 1), y_pred)
示例6: test_lml_gradient
def test_lml_gradient(kernel):
# Compare analytic and numeric gradient of log marginal likelihood.
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = gpc.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpc.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
示例7: test_multi_class_n_jobs
def test_multi_class_n_jobs(kernel):
# Test that multi-class GPC produces identical results with n_jobs>1.
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
gpc_2 = GaussianProcessClassifier(kernel=kernel, n_jobs=2)
gpc_2.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
y_prob_2 = gpc_2.predict_proba(X2)
assert_almost_equal(y_prob, y_prob_2)
示例8: test_random_starts
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the log marginal likelihood of the chosen theta.
n_samples, n_features = 25, 2
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = (np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)) > 0
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1e-3] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features)
last_lml = -np.inf
for n_restarts_optimizer in range(5):
gp = GaussianProcessClassifier(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert_greater(lml, last_lml - np.finfo(np.float32).eps)
last_lml = lml
示例9: test_custom_optimizer
def test_custom_optimizer(kernel):
# Test that GPC can use externally defined optimizers.
# Define a dummy optimizer that simply tests 50 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(50):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
gpc = GaussianProcessClassifier(kernel=kernel, optimizer=optimizer)
gpc.fit(X, y_mc)
# Checks that optimizer improved marginal likelihood
assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(kernel.theta))
示例10: trainModel
def trainModel(subjectid):
# Load training data from the file matlab generates
traindata = np.genfromtxt('csvdata/' + subjectid +
'_sim.csv', delimiter=',',
missing_values=['NaN', 'nan'],
filling_values=None)
trainx, trainy = cleandata(traindata, downsamplefactor=20)
# Train a Gaussian Process
anisokern = kernels.RBF() # default kernel
gp = GaussianProcessClassifier(kernel=anisokern) # Initialize the GPC
gp.fit(trainx, trainy) # train this class on the data
trainx = trainy = None # Discard all training data to preserve memory
# Load test data
testdata = np.genfromtxt('csvdata/' + subjectid +
'_rival.csv', delimiter=',',
missing_values=['NaN', 'nan'],
filling_values=None)
testx, testy = cleandata(testdata, downsamplefactor=4) # clean data
return gp, testx, testy
示例11: test_lml_precomputed
def test_lml_precomputed():
# Test that lml of optimized kernel is stored correctly.
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_almost_equal(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(), 7)
示例12: test_lml_improving
def test_lml_improving(kernel):
# Test that hyperparameter-tuning improves log-marginal likelihood.
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(kernel.theta))
示例13: GaussianProcessClassifier
from matplotlib import pyplot as plt
from sklearn.metrics.classification import accuracy_score, log_loss
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# Generate data
train_size = 50
rng = np.random.RandomState(0)
X = rng.uniform(0, 5, 100)[:, np.newaxis]
y = np.array(X[:, 0] > 2.5, dtype=int)
# Specify Gaussian Processes with fixed and optimized hyperparameters
gp_fix = GaussianProcessClassifier(kernel=1.0 * RBF(length_scale=1.0), optimizer=None)
gp_fix.fit(X[:train_size], y[:train_size])
gp_opt = GaussianProcessClassifier(kernel=1.0 * RBF(length_scale=1.0))
gp_opt.fit(X[:train_size], y[:train_size])
print("Log Marginal Likelihood (initial): %.3f" % gp_fix.log_marginal_likelihood(gp_fix.kernel_.theta))
print("Log Marginal Likelihood (optimized): %.3f" % gp_opt.log_marginal_likelihood(gp_opt.kernel_.theta))
print(
"Accuracy: %.3f (initial) %.3f (optimized)"
% (
accuracy_score(y[:train_size], gp_fix.predict(X[:train_size])),
accuracy_score(y[:train_size], gp_opt.predict(X[:train_size])),
)
)
示例14: RBF
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, DotProduct
xx, yy = np.meshgrid(np.linspace(-3, 3, 50),
np.linspace(-3, 3, 50))
rng = np.random.RandomState(0)
X = rng.randn(200, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
plt.figure(figsize=(10, 5))
kernels = [1.0 * RBF(length_scale=1.0), 1.0 * DotProduct(sigma_0=1.0)**2]
for i, kernel in enumerate(kernels):
clf = GaussianProcessClassifier(kernel=kernel, warm_start=True).fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.predict_proba(np.vstack((xx.ravel(), yy.ravel())).T)[:, 1]
Z = Z.reshape(xx.shape)
plt.subplot(1, 2, i + 1)
image = plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
aspect='auto', origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
示例15: trainPredict
def trainPredict(subjectid, makeplot=False):
print("testing participant " + subjectid)
# Load training data from the file matlab generates
traindata = np.genfromtxt('csvdata/' + subjectid +
'_sim.csv', delimiter=',',
missing_values=['NaN', 'nan'],
filling_values=None)
# Clean + downsample this data
trainx, trainy = cleandata(traindata, downsamplefactor=20)
# Train a Gaussian Process
anisokern = kernels.RBF() # default kernel
gp = GaussianProcessClassifier(kernel=anisokern) # Initialize the GPC
gp.fit(trainx, trainy) # train this class on the data
trainx = trainy = None # Discard all training data to preserve memory
# load test data
testdata = np.genfromtxt('csvdata/' + subjectid +
'_rival.csv', delimiter=',',
missing_values=['NaN', 'nan'],
filling_values=None)
testx, testy = cleandata(testdata, downsamplefactor=4) # clean data
testdata = None # clear from memory
# work out percentage in percept for each data point:
percentages, nextpercept = assign_percentage(testy)
# get a prediction for all points in the test data:
predicty = gp.predict(testx)
proby = gp.predict_proba(testx)
if makeplot:
summaryplot(participant, testx, testy, predicty, proby, gp)
# Summarise prediction by reported percept
meanprediction = {'mean' + percept:
proby[testy == value, 1].mean()
for percept, value in perceptindices.iteritems()}
predictiondev = {'stdev' + percept:
proby[testy == value, 1].std()
for percept, value in perceptindices.iteritems()}
predictionaccuracy = {'acc' + percept:
(predicty[testy == value] ==
testy[testy == value]).mean()
for percept, value in perceptindices.iteritems()}
# Summarise prediction by percentage in percept
predictioncourse = {'timecourse' + percept + str(cutoff):
proby[(testy == value) &
(percentages < cutoff) &
(percentages > cutoff - 0.1), 1].mean()
for percept, value in perceptindices.iteritems()
for cutoff in np.linspace(0.1, 1, 10)}
# Summarise mixed percept time courses by the next percept
nextcourse = {'nextcourse' + percept + str(cutoff):
proby[(testy == 0) &
(percentages < cutoff) &
(percentages > cutoff - 0.1) &
(nextpercept == perceptindices[percept]), 1].mean()
for percept in ['highfreq', 'lowfreq']
for cutoff in np.linspace(0.1, 1, 10)}
afterdominant = {'after' + percept + "_" + after + "_" + str(cutoff):
proby[(testy == perceptindices[percept]) &
(percentages < cutoff) &
(percentages > cutoff - 0.1) &
(nextpercept == perceptindices[after]), 1].mean()
for percept, after in [('highfreq', 'mixed'),
('highfreq', 'lowfreq'),
('lowfreq', 'mixed'),
('lowfreq', 'highfreq')]
for cutoff in np.linspace(0.1, 1, 10)}
# Only return the summarised data
return meanprediction, predictiondev, predictionaccuracy, \
predictioncourse, nextcourse, afterdominant