本文整理汇总了Python中sklearn.gaussian_process.GaussianProcessRegressor类的典型用法代码示例。如果您正苦于以下问题:Python GaussianProcessRegressor类的具体用法?Python GaussianProcessRegressor怎么用?Python GaussianProcessRegressor使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了GaussianProcessRegressor类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_globals
def get_globals():
X = np.array([
[0.00, 0.00],
[0.99, 0.99],
[0.00, 0.99],
[0.99, 0.00],
[0.50, 0.50],
[0.25, 0.50],
[0.50, 0.25],
[0.75, 0.50],
[0.50, 0.75],
])
def get_y(X):
return -(X[:, 0] - 0.3) ** 2 - 0.5 * (X[:, 1] - 0.6)**2 + 2
y = get_y(X)
mesh = np.dstack(
np.meshgrid(np.arange(0, 1, 0.01), np.arange(0, 1, 0.01))
).reshape(-1, 2)
GP = GaussianProcessRegressor(
kernel=Matern(),
n_restarts_optimizer=25,
)
GP.fit(X, y)
return {'x': X, 'y': y, 'gp': GP, 'mesh': mesh}
示例2: test_predict_cov_vs_std
def test_predict_cov_vs_std():
""" Test that predicted std.-dev. is consistent with cov's diagonal."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
y_mean, y_std = gpr.predict(X2, return_std=True)
assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std)
示例3: test_y_normalization
def test_y_normalization():
""" Test normalization of the target values in GP
Fitting non-normalizing GP on normalized y and fitting normalizing GP
on unnormalized y should yield identical results
"""
y_mean = y.mean(0)
y_norm = y - y_mean
for kernel in kernels:
# Fit non-normalizing GP on normalized y
gpr = GaussianProcessRegressor(kernel=kernel)
gpr.fit(X, y_norm)
# Fit normalizing GP on unnormalized y
gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_norm.fit(X, y)
# Compare predicted mean, std-devs and covariances
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
y_pred = y_mean + y_pred
y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True)
assert_almost_equal(y_pred, y_pred_norm)
assert_almost_equal(y_pred_std, y_pred_std_norm)
_, y_cov = gpr.predict(X2, return_cov=True)
_, y_cov_norm = gpr_norm.predict(X2, return_cov=True)
assert_almost_equal(y_cov, y_cov_norm)
示例4: test_gpr_interpolation
def test_gpr_interpolation(kernel):
# Test the interpolating property for different kernels.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_pred, y)
assert_almost_equal(np.diag(y_cov), 0.)
示例5: test_lml_improving
def test_lml_improving():
""" Test that hyperparameter-tuning improves log-marginal likelihood. """
for kernel in kernels:
if kernel == fixed_kernel: continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(kernel.theta))
示例6: bo_
def bo_(x_obs, y_obs):
kernel = kernels.Matern() + kernels.WhiteKernel()
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=16)
gp.fit(x_obs, y_obs)
xs = list(repeat(np.atleast_2d(np.linspace(0, 10, 128)).T, 2))
x = cartesian_product(*xs)
a = a_EI(gp, x_obs=x_obs, y_obs=y_obs)
argmin_a_x = x[np.argmax(a(x))]
# heavy evaluation
print("f({})".format(argmin_a_x))
f_argmin_a_x = f2d(np.atleast_2d(argmin_a_x))
plot_2d(gp, x_obs, y_obs, argmin_a_x, a, xs)
plt.show()
bo_(
x_obs=np.vstack((x_obs, argmin_a_x)),
y_obs=np.hstack((y_obs, f_argmin_a_x)),
)
示例7: test_gpr_interpolation
def test_gpr_interpolation():
"""Test the interpolating property for different kernels."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_true(np.allclose(y_pred, y))
assert_true(np.allclose(np.diag(y_cov), 0.))
示例8: test_acquisition_api
def test_acquisition_api():
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
y = rng.randn(10)
gpr = GaussianProcessRegressor()
gpr.fit(X, y)
for method in [gaussian_ei, gaussian_lcb, gaussian_pi]:
assert_array_equal(method(X, gpr).shape, 10)
assert_raises(ValueError, method, rng.rand(10), gpr)
示例9: test_converged_to_local_maximum
def test_converged_to_local_maximum(kernel):
# Test that we are in local maximum after hyperparameter-optimization.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpr.log_marginal_likelihood(gpr.kernel_.theta, True)
assert_true(np.all((np.abs(lml_gradient) < 1e-4) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 0]) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 1])))
示例10: test_lml_gradient
def test_lml_gradient():
""" Compare analytic and numeric gradient of log marginal likelihood. """
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = gpr.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = approx_fprime(
kernel.theta, lambda theta: gpr.log_marginal_likelihood(theta, False), 1e-10
)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
示例11: test_sample_statistics
def test_sample_statistics():
""" Test that statistics of samples drawn from GP are correct."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
samples = gpr.sample_y(X2, 300000)
# More digits accuracy would require many more samples
assert_almost_equal(y_mean, np.mean(samples, 1), 2)
assert_almost_equal(np.diag(y_cov) / np.diag(y_cov).max(), np.var(samples, 1) / np.diag(y_cov).max(), 1)
示例12: test_prior
def test_prior(kernel):
# Test that GP prior has mean 0 and identical variances.
gpr = GaussianProcessRegressor(kernel=kernel)
y_mean, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_mean, 0, 5)
if len(gpr.kernel.theta) > 1:
# XXX: quite hacky, works only for current kernels
assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5)
else:
assert_almost_equal(np.diag(y_cov), 1, 5)
示例13: test_no_fit_default_predict
def test_no_fit_default_predict():
# Test that GPR predictions without fit does not break by default.
default_kernel = (C(1.0, constant_value_bounds="fixed") *
RBF(1.0, length_scale_bounds="fixed"))
gpr1 = GaussianProcessRegressor()
_, y_std1 = gpr1.predict(X, return_std=True)
_, y_cov1 = gpr1.predict(X, return_cov=True)
gpr2 = GaussianProcessRegressor(kernel=default_kernel)
_, y_std2 = gpr2.predict(X, return_std=True)
_, y_cov2 = gpr2.predict(X, return_cov=True)
assert_array_almost_equal(y_std1, y_std2)
assert_array_almost_equal(y_cov1, y_cov2)
示例14: SmoothFunctionCreator
class SmoothFunctionCreator():
def __init__(self, seed=42):
self._gp = GaussianProcessRegressor()
x_train = np.array([0.0, 2.0, 6.0, 10.0])[:, np.newaxis]
source_train = np.array([0.0, 1.0, -1.0, 0.0])
self._gp.fit(x_train, source_train)
self._random_state = np.random.RandomState(seed)
def sample(self, n_samples):
x = np.linspace(0.0, 10.0, 100)[:, np.newaxis]
source = self._gp.sample_y(x, n_samples, random_state=self._random_state)
target = gaussian_filter1d(source, 1, order=1, axis=0)
target = np.tanh(10.0 * target)
return source, target
示例15: fit_GP
def fit_GP(x_train):
y_train = gaussian(x_train, mu, sig).ravel()
# Instanciate a Gaussian Process model
kernel = C(1.0, (1e-3, 1e3)) * RBF(1, (1e-2, 1e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(x_train, y_train)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x, return_std=True)
return y_train, y_pred, sigma