本文整理汇总了Python中sklearn.gaussian_process.GaussianProcessRegressor.fit方法的典型用法代码示例。如果您正苦于以下问题:Python GaussianProcessRegressor.fit方法的具体用法?Python GaussianProcessRegressor.fit怎么用?Python GaussianProcessRegressor.fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.gaussian_process.GaussianProcessRegressor
的用法示例。
在下文中一共展示了GaussianProcessRegressor.fit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_y_normalization
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessRegressor [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessRegressor import fit [as 别名]
def test_y_normalization():
""" Test normalization of the target values in GP
Fitting non-normalizing GP on normalized y and fitting normalizing GP
on unnormalized y should yield identical results
"""
y_mean = y.mean(0)
y_norm = y - y_mean
for kernel in kernels:
# Fit non-normalizing GP on normalized y
gpr = GaussianProcessRegressor(kernel=kernel)
gpr.fit(X, y_norm)
# Fit normalizing GP on unnormalized y
gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_norm.fit(X, y)
# Compare predicted mean, std-devs and covariances
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
y_pred = y_mean + y_pred
y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True)
assert_almost_equal(y_pred, y_pred_norm)
assert_almost_equal(y_pred_std, y_pred_std_norm)
_, y_cov = gpr.predict(X2, return_cov=True)
_, y_cov_norm = gpr_norm.predict(X2, return_cov=True)
assert_almost_equal(y_cov, y_cov_norm)
示例2: get_globals
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessRegressor [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessRegressor import fit [as 别名]
def get_globals():
X = np.array([
[0.00, 0.00],
[0.99, 0.99],
[0.00, 0.99],
[0.99, 0.00],
[0.50, 0.50],
[0.25, 0.50],
[0.50, 0.25],
[0.75, 0.50],
[0.50, 0.75],
])
def get_y(X):
return -(X[:, 0] - 0.3) ** 2 - 0.5 * (X[:, 1] - 0.6)**2 + 2
y = get_y(X)
mesh = np.dstack(
np.meshgrid(np.arange(0, 1, 0.01), np.arange(0, 1, 0.01))
).reshape(-1, 2)
GP = GaussianProcessRegressor(
kernel=Matern(),
n_restarts_optimizer=25,
)
GP.fit(X, y)
return {'x': X, 'y': y, 'gp': GP, 'mesh': mesh}
示例3: bo_
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessRegressor [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessRegressor import fit [as 别名]
def bo_(x_obs, y_obs):
kernel = kernels.Matern() + kernels.WhiteKernel()
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=16)
gp.fit(x_obs, y_obs)
xs = list(repeat(np.atleast_2d(np.linspace(0, 10, 128)).T, 2))
x = cartesian_product(*xs)
a = a_EI(gp, x_obs=x_obs, y_obs=y_obs)
argmin_a_x = x[np.argmax(a(x))]
# heavy evaluation
print("f({})".format(argmin_a_x))
f_argmin_a_x = f2d(np.atleast_2d(argmin_a_x))
plot_2d(gp, x_obs, y_obs, argmin_a_x, a, xs)
plt.show()
bo_(
x_obs=np.vstack((x_obs, argmin_a_x)),
y_obs=np.hstack((y_obs, f_argmin_a_x)),
)
示例4: test_acquisition_api
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessRegressor [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessRegressor import fit [as 别名]
def test_acquisition_api():
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
y = rng.randn(10)
gpr = GaussianProcessRegressor()
gpr.fit(X, y)
for method in [gaussian_ei, gaussian_lcb, gaussian_pi]:
assert_array_equal(method(X, gpr).shape, 10)
assert_raises(ValueError, method, rng.rand(10), gpr)
示例5: fit_GP
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessRegressor [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessRegressor import fit [as 别名]
def fit_GP(x_train):
y_train = gaussian(x_train, mu, sig).ravel()
# Instanciate a Gaussian Process model
kernel = C(1.0, (1e-3, 1e3)) * RBF(1, (1e-2, 1e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(x_train, y_train)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x, return_std=True)
return y_train, y_pred, sigma
示例6: SmoothFunctionCreator
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessRegressor [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessRegressor import fit [as 别名]
class SmoothFunctionCreator():
def __init__(self, seed=42):
self._gp = GaussianProcessRegressor()
x_train = np.array([0.0, 2.0, 6.0, 10.0])[:, np.newaxis]
source_train = np.array([0.0, 1.0, -1.0, 0.0])
self._gp.fit(x_train, source_train)
self._random_state = np.random.RandomState(seed)
def sample(self, n_samples):
x = np.linspace(0.0, 10.0, 100)[:, np.newaxis]
source = self._gp.sample_y(x, n_samples, random_state=self._random_state)
target = gaussian_filter1d(source, 1, order=1, axis=0)
target = np.tanh(10.0 * target)
return source, target
示例7: plot_gp
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessRegressor [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessRegressor import fit [as 别名]
def plot_gp(x_min, x_max, x, y, train_features, train_labels):
fig = plt.figure(figsize=(16, 10))
fig.suptitle('Gaussian Process and Utility Function After {} Steps'.format(len(train_features)), fontdict={'size':30})
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
axis = plt.subplot(gs[0])
acq = plt.subplot(gs[1])
gp = GaussianProcessRegressor(
kernel=Matern(nu=2.5),
n_restarts_optimizer=25, )
gp.fit(train_features, train_labels)
mu, sigma = gp.predict(x, return_std=True)
axis.plot(x, y, linewidth=3, label='Target')
axis.plot(train_features.flatten(), train_labels, 'D', markersize=8, label=u'Observations', color='r')
axis.plot(x, mu, '--', color='k', label='Prediction')
axis.fill(np.concatenate([x, x[::-1]]),
np.concatenate([mu - 1.9600 * sigma, (mu + 1.9600 * sigma)[::-1]]),
alpha=.6, fc='c', ec='None', label='95% confidence interval')
axis.set_xlim((x_min, x_max))
axis.set_ylim((None, None))
axis.set_ylabel('f(x)', fontdict={'size':20})
axis.set_xlabel('x', fontdict={'size':20})
bounds = np.asarray([[x_min, x_max]])
acquisition_fucntion_kappa = 5
mean, std = gp.predict(x, return_std=True)
acquisition_fucntion_values = mean + acquisition_fucntion_kappa * std
acq.plot(x, acquisition_fucntion_values, label='Utility Function', color='purple')
acq.plot(x[np.argmax(acquisition_fucntion_values)], np.max(acquisition_fucntion_values), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
acq.set_xlim((x_min, x_max))
acq.set_ylim((0, np.max(acquisition_fucntion_values) + 0.5))
acq.set_ylabel('Utility', fontdict={'size':20})
acq.set_xlabel('x', fontdict={'size':20})
axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
示例8: test_K_inv_reset
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessRegressor [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessRegressor import fit [as 别名]
def test_K_inv_reset(kernel):
y2 = f(X2).ravel()
# Test that self._K_inv is reset after a new fit
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert hasattr(gpr, '_K_inv')
assert gpr._K_inv is None
gpr.predict(X, return_std=True)
assert gpr._K_inv is not None
gpr.fit(X2, y2)
assert gpr._K_inv is None
gpr.predict(X2, return_std=True)
gpr2 = GaussianProcessRegressor(kernel=kernel).fit(X2, y2)
gpr2.predict(X2, return_std=True)
# the value of K_inv should be independent of the first fit
assert_array_equal(gpr._K_inv, gpr2._K_inv)
示例9: test_GP_brownian_motion
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessRegressor [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessRegressor import fit [as 别名]
def test_GP_brownian_motion(self):
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
# add data
t = np.linspace(0, 10, 100)
#
# Instanciate a Gaussian Process model
# kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2))
# Instanciate a Gaussian Process model
kernel = lambda x, y: 1. * min(x, y)
# kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
# gp = GaussianProcessRegressor()
# Fit to data using Maximum Likelihood Estimation of the parameters
X = np.atleast_2d(t).T
gp.fit(X, y)
# gp = GaussianProcessRegressor()
# Fit to data using Maximum Likelihood Estimation of the parameters
# gp.fit(t, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
# y_star, err_y_star = gp.predict(t, return_std=True)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(t, return_std=True)
fig = plt.figure()
ax = fig.add_axes((0.1, 0.3, 0.8, 0.65))
ax.invert_yaxis()
ax.plot(t, y, color='blue', label='L bol', lw=2.5)
ax.errorbar(t, y, yerr=yerr, fmt='o', color='blue', label='%s obs.')
#
# ax.plot(t, y_star, color='red', ls='--', lw=1.5, label='GP')
ax.plot(t, y_pred, '-', color='gray')
# ax.fill_between(t, y_star - 2 * err_y_star, y_star + 2 * err_y_star, color='gray', alpha=0.3)
ax.fill(np.concatenate([t, t[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
plt.show()
示例10: plot_gaussian
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessRegressor [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessRegressor import fit [as 别名]
def plot_gaussian(data, col):
'''
Plots the gaussian process regression with a characteristic length scale
of 10 years. Essentially this highlights the 'slow trend' in the data.
Parameters
----------
data: dataframe
pandas dataframe containing 'date', 'linMean' which is the average
runtime and 'linSD' which is the standard deviation.
col: string
the color in which the plot the data
'''
#extract the results from the dataframe
Year = np.array(data[u'date'].tolist())
Mean = np.array(data[u'linMean'].tolist())
SD = np.array(data[u'linSD'].tolist())
#initialize the gaussian process. Note that the process is calculated with a
#length scale of 10years to give the 'slow trend' in the results.
length_scale = 10.
kernel = 1.* RBF(length_scale)
gp = GaussianProcessRegressor(kernel=kernel, sigma_squared_n=(SD) ** 2, \
normalize_y=True)
#now fit the data and get the predicted mean and standard deviation
#Note: for reasons that are unclear, GaussianProcessRegressor won't take 1D
#arrays so the data are converted to 2D and then converted back for plotting
gp.fit(np.atleast_2d(Year).T, np.atleast_2d(Mean).T)
Year_array = np.atleast_2d(np.linspace(min(Year)-2, max(Year)+2, 100)).T
Mean_prediction, SD_prediction = gp.predict(Year_pred, return_std=True)
Year_array=Year_array.ravel()
Mean_prediction=Mean_prediction.ravel()
#plot the predicted best fit
plt.plot(Year_array, Mean_prediction, col, alpha=1)
#plot the 95% confidence interval
plt.fill_between(Year_array, (Mean_prediction - 1.9600 * SD_prediction), \
y2=(Mean_prediction + 1.9600 * SD_prediction), alpha=0.5, \
color=col)
plt.draw()
示例11: test_duplicate_input
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessRegressor [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessRegressor import fit [as 别名]
def test_duplicate_input():
""" Test GPR can handle two different output-values for the same input. """
for kernel in kernels:
gpr_equal_inputs = GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
gpr_similar_inputs = GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
X_ = np.vstack((X, X[0]))
y_ = np.hstack((y, y[0] + 1))
gpr_equal_inputs.fit(X_, y_)
X_ = np.vstack((X, X[0] + 1e-15))
y_ = np.hstack((y, y[0] + 1))
gpr_similar_inputs.fit(X_, y_)
X_test = np.linspace(0, 10, 100)[:, None]
y_pred_equal, y_std_equal = gpr_equal_inputs.predict(X_test, return_std=True)
y_pred_similar, y_std_similar = gpr_similar_inputs.predict(X_test, return_std=True)
assert_almost_equal(y_pred_equal, y_pred_similar)
assert_almost_equal(y_std_equal, y_std_similar)
示例12: test_custom_optimizer
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessRegressor [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessRegressor import fit [as 别名]
def test_custom_optimizer():
""" Test that GPR can use externally defined optimizers. """
# Define a dummy optimizer that simply tests 50 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(50):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]), np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer)
gpr.fit(X, y)
# Checks that optimizer improved marginal likelihood
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta), gpr.log_marginal_likelihood(gpr.kernel.theta))
示例13: GP
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessRegressor [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessRegressor import fit [as 别名]
class GP(BaseTuner):
def __init__(self, tunables, gridding=0, r_minimum=2):
"""
Extra args:
r_minimum: the minimum number of past results this selector needs in
order to use gaussian process for prediction. If not enough
results are present during a fit(), subsequent calls to
propose() will revert to uniform selection.
"""
super(GP, self).__init__(tunables, gridding=gridding)
self.r_minimum = r_minimum
def fit(self, X, y):
""" Use X and y to train a Gaussian process. """
super(GP, self).fit(X, y)
# skip training the process if there aren't enough samples
if X.shape[0] < self.r_minimum:
return
self.gp = GaussianProcessRegressor(normalize_y=True)
self.gp.fit(X, y)
def predict(self, X):
if self.X.shape[0] < self.r_minimum:
# we probably don't have enough
logger.warn('GP: not enough data, falling back to uniform sampler')
return Uniform(self.tunables).predict(X)
y, stdev = self.gp.predict(X, return_std=True)
return np.array(list(zip(y, stdev)))
def _acquire(self, predictions):
"""
Predictions from the GP will be in the form (prediction, error).
The default acquisition function returns the index with the highest
predicted value, not factoring in error.
"""
return np.argmax(predictions[:, 0])
示例14: setTrainMC
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessRegressor [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessRegressor import fit [as 别名]
def setTrainMC(self, MCHisto):
"""Train a GP on a histogram to get hyperparamters.
Use a high stats MC sample to optimize the hyperparamters then
return a kernel object to be used in the data fit.
"""
print "===== Optimizing hyperparamters on the training sample."
GPh = GPHisto(MCHisto)
X_t = GPh.getXArr()
Y_t = GPh.getYArr()
dy_t = GPh.getErrArr()
gp = GaussianProcessRegressor(kernel=self.kernel
,alpha=dy_t**2
,n_restarts_optimizer=10)
# Fit for the hyperparameters.
gp.fit(X_t, Y_t)
print "Optimized hyperparameters:"
print gp.kernel_
# return a kernel object with hyperparameters optimized
return gp.kernel_
示例15: integrated_sigma
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessRegressor [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessRegressor import fit [as 别名]
def integrated_sigma(alpha, n_samples, n_restarts_optimizer=16, f=f):
print("integrated_sigma(n_samples={n_samples}, alpha={alpha})".format(
n_samples=n_samples,
alpha=alpha,
))
X = np.atleast_2d(
np.linspace(1, 9, n_samples)
).T
y = f(X).ravel()
x = np.atleast_2d(np.linspace(0, 10, 16 * 1024)).T
kernel = kernels.Matern() + (kernels.WhiteKernel(noise_level=alpha) if alpha is not None else 0.0)
gp = GaussianProcessRegressor(
kernel=kernel,
n_restarts_optimizer=n_restarts_optimizer,
)
gp.fit(X, y)
y_pred, sigma = gp.predict(x, return_std=True)
return simps(
x=x.ravel(),
y=sigma,
)