本文整理汇总了Python中sklearn.gaussian_process.GaussianProcessRegressor.sample_y方法的典型用法代码示例。如果您正苦于以下问题:Python GaussianProcessRegressor.sample_y方法的具体用法?Python GaussianProcessRegressor.sample_y怎么用?Python GaussianProcessRegressor.sample_y使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.gaussian_process.GaussianProcessRegressor
的用法示例。
在下文中一共展示了GaussianProcessRegressor.sample_y方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_y_multioutput
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessRegressor [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessRegressor import sample_y [as 别名]
def test_y_multioutput():
""" Test that GPR can deal with multi-dimensional target values"""
y_2d = np.vstack((y, y*2)).T
# Test for fixed kernel that first dimension of 2d GP equals the output
# of 1d GP and that second dimension is twice as large
kernel = RBF(length_scale=1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr_2d.fit(X, y_2d)
y_pred_1d, y_std_1d = gpr.predict(X2, return_std=True)
y_pred_2d, y_std_2d = gpr_2d.predict(X2, return_std=True)
_, y_cov_1d = gpr.predict(X2, return_cov=True)
_, y_cov_2d = gpr_2d.predict(X2, return_cov=True)
assert_almost_equal(y_pred_1d, y_pred_2d[:, 0])
assert_almost_equal(y_pred_1d, y_pred_2d[:, 1] / 2)
# Standard deviation and covariance do not depend on output
assert_almost_equal(y_std_1d, y_std_2d)
assert_almost_equal(y_cov_1d, y_cov_2d)
y_sample_1d = gpr.sample_y(X2, n_samples=10)
y_sample_2d = gpr_2d.sample_y(X2, n_samples=10)
assert_almost_equal(y_sample_1d, y_sample_2d[:, 0])
# Test hyperparameter optimization
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_2d.fit(X, np.vstack((y, y)).T)
assert_almost_equal(gpr.kernel_.theta, gpr_2d.kernel_.theta, 4)
示例2: test_sample_statistics
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessRegressor [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessRegressor import sample_y [as 别名]
def test_sample_statistics():
""" Test that statistics of samples drawn from GP are correct."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
samples = gpr.sample_y(X2, 300000)
# More digits accuracy would require many more samples
assert_almost_equal(y_mean, np.mean(samples, 1), 2)
assert_almost_equal(np.diag(y_cov) / np.diag(y_cov).max(), np.var(samples, 1) / np.diag(y_cov).max(), 1)
示例3: SmoothFunctionCreator
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessRegressor [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessRegressor import sample_y [as 别名]
class SmoothFunctionCreator():
def __init__(self, seed=42):
self._gp = GaussianProcessRegressor()
x_train = np.array([0.0, 2.0, 6.0, 10.0])[:, np.newaxis]
source_train = np.array([0.0, 1.0, -1.0, 0.0])
self._gp.fit(x_train, source_train)
self._random_state = np.random.RandomState(seed)
def sample(self, n_samples):
x = np.linspace(0.0, 10.0, 100)[:, np.newaxis]
source = self._gp.sample_y(x, n_samples, random_state=self._random_state)
target = gaussian_filter1d(source, 1, order=1, axis=0)
target = np.tanh(10.0 * target)
return source, target
示例4: f
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessRegressor [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessRegressor import sample_y [as 别名]
plt.plot(x, y_pred, 'b-', label=u'Prediction')
#plt.fill(np.concatenate([x, x[::-1]]),
# np.concatenate([y_pred - 1.9600 * sigma,
# (y_pred + 1.9600 * sigma)[::-1]]),
# alpha=.5, fc='b', ec='None', label='95% confidence interval')
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
fig = plt.figure()
y_samples = gp.sample_y(x, 10)
plt.plot(x, y_samples, lw=1)
plt.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'r.', markersize=10, label=u'Observations')
#plt.plot(x, y_pred, 'b-', label=u'Prediction')
#plt.fill(np.concatenate([x, x[::-1]]),
# np.concatenate([y_pred - 1.9600 * sigma,
# (y_pred + 1.9600 * sigma)[::-1]]),
# alpha=.5, fc='b', ec='None', label='95% confidence interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
fig = plt.figure()
#y_samples = gp.sample_y(x, 10)
示例5: range
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessRegressor [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessRegressor import sample_y [as 别名]
# Visualization of prior
pylab.figure(0, figsize=(10, 8))
X_nn = gp.kernel.k2._project_manifold(X_[:, None])
pylab.subplot(3, 2, 1)
for i in range(X_nn.shape[1]):
pylab.plot(X_, X_nn[:, i], label="Manifold-dim %d" % i)
pylab.legend(loc="best")
pylab.xlim(-7.5, 7.5)
pylab.title("Prior mapping to manifold")
pylab.subplot(3, 2, 2)
y_mean, y_std = gp.predict(X_[:, None], return_std=True)
pylab.plot(X_, y_mean, 'k', lw=3, zorder=9, label="mean")
pylab.fill_between(X_, y_mean - y_std, y_mean + y_std,
alpha=0.5, color='k')
y_samples = gp.sample_y(X_[:, None], 10)
pylab.plot(X_, y_samples, color='b', lw=1)
pylab.plot(X_, y_samples[:, 0], color='b', lw=1, label="samples") # just for the legend
pylab.legend(loc="best")
pylab.xlim(-7.5, 7.5)
pylab.ylim(-4, 3)
pylab.title("Prior samples")
# Generate data and fit GP
X = np.random.uniform(-5, 5, 40)[:, None]
y = np.sin(X[:, 0]) + (X[:, 0] > 0)
gp.fit(X, y)
# Visualization of posterior
X_nn = gp.kernel_.k2._project_manifold(X_[:, None])