本文整理汇总了Python中sklearn.gaussian_process.GaussianProcessClassifier.fit方法的典型用法代码示例。如果您正苦于以下问题:Python GaussianProcessClassifier.fit方法的具体用法?Python GaussianProcessClassifier.fit怎么用?Python GaussianProcessClassifier.fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.gaussian_process.GaussianProcessClassifier
的用法示例。
在下文中一共展示了GaussianProcessClassifier.fit方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_classifier_gp
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessClassifier [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessClassifier import fit [as 别名]
def build_classifier_gp(data, labels, **kwargs):
linear_kernel = Sum(k1=Product(k1=DotProduct(sigma_0=0, sigma_0_bounds='fixed'), k2=ConstantKernel()),
k2=ConstantKernel())
gp_clf = GaussianProcessClassifier(kernel=linear_kernel)
gp_clf.fit(data, labels)
id_pos_class = gp_clf.classes_ == labels.max()
return gp_clf, gp_clf.predict_proba(data)[:, id_pos_class]
示例2: test_multi_class
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessClassifier [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessClassifier import fit [as 别名]
def test_multi_class(kernel):
# Test GPC for multi-class classification problems.
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
assert_almost_equal(y_prob.sum(1), 1)
y_pred = gpc.predict(X2)
assert_array_equal(np.argmax(y_prob, 1), y_pred)
示例3: test_multi_class_n_jobs
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessClassifier [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessClassifier import fit [as 别名]
def test_multi_class_n_jobs(kernel):
# Test that multi-class GPC produces identical results with n_jobs>1.
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
gpc_2 = GaussianProcessClassifier(kernel=kernel, n_jobs=2)
gpc_2.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
y_prob_2 = gpc_2.predict_proba(X2)
assert_almost_equal(y_prob, y_prob_2)
示例4: test_custom_optimizer
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessClassifier [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessClassifier import fit [as 别名]
def test_custom_optimizer(kernel):
# Test that GPC can use externally defined optimizers.
# Define a dummy optimizer that simply tests 50 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(50):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
gpc = GaussianProcessClassifier(kernel=kernel, optimizer=optimizer)
gpc.fit(X, y_mc)
# Checks that optimizer improved marginal likelihood
assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(kernel.theta))
示例5: trainModel
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessClassifier [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessClassifier import fit [as 别名]
def trainModel(subjectid):
# Load training data from the file matlab generates
traindata = np.genfromtxt('csvdata/' + subjectid +
'_sim.csv', delimiter=',',
missing_values=['NaN', 'nan'],
filling_values=None)
trainx, trainy = cleandata(traindata, downsamplefactor=20)
# Train a Gaussian Process
anisokern = kernels.RBF() # default kernel
gp = GaussianProcessClassifier(kernel=anisokern) # Initialize the GPC
gp.fit(trainx, trainy) # train this class on the data
trainx = trainy = None # Discard all training data to preserve memory
# Load test data
testdata = np.genfromtxt('csvdata/' + subjectid +
'_rival.csv', delimiter=',',
missing_values=['NaN', 'nan'],
filling_values=None)
testx, testy = cleandata(testdata, downsamplefactor=4) # clean data
return gp, testx, testy
示例6: C
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessClassifier [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessClassifier import fit [as 别名]
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = np.array(g(X) > 0, dtype=int)
# Instanciate and fit Gaussian Process Model
kernel = C(0.1, (1e-5, np.inf)) * DotProduct(sigma_0=0.1) ** 2
gp = GaussianProcessClassifier(kernel=kernel)
gp.fit(X, y)
print("Learned kernel: %s " % gp.kernel_)
# Evaluate real function and the predicted probability
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_prob = gp.predict_proba(xx)[:, 1]
y_true = y_true.reshape((res, res))
y_prob = y_prob.reshape((res, res))
# Plot the probabilistic classification iso-values
fig = plt.figure(1)
示例7: trainPredict
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessClassifier [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessClassifier import fit [as 别名]
def trainPredict(subjectid, makeplot=False):
print("testing participant " + subjectid)
# Load training data from the file matlab generates
traindata = np.genfromtxt('csvdata/' + subjectid +
'_sim.csv', delimiter=',',
missing_values=['NaN', 'nan'],
filling_values=None)
# Clean + downsample this data
trainx, trainy = cleandata(traindata, downsamplefactor=20)
# Train a Gaussian Process
anisokern = kernels.RBF() # default kernel
gp = GaussianProcessClassifier(kernel=anisokern) # Initialize the GPC
gp.fit(trainx, trainy) # train this class on the data
trainx = trainy = None # Discard all training data to preserve memory
# load test data
testdata = np.genfromtxt('csvdata/' + subjectid +
'_rival.csv', delimiter=',',
missing_values=['NaN', 'nan'],
filling_values=None)
testx, testy = cleandata(testdata, downsamplefactor=4) # clean data
testdata = None # clear from memory
# work out percentage in percept for each data point:
percentages, nextpercept = assign_percentage(testy)
# get a prediction for all points in the test data:
predicty = gp.predict(testx)
proby = gp.predict_proba(testx)
if makeplot:
summaryplot(participant, testx, testy, predicty, proby, gp)
# Summarise prediction by reported percept
meanprediction = {'mean' + percept:
proby[testy == value, 1].mean()
for percept, value in perceptindices.iteritems()}
predictiondev = {'stdev' + percept:
proby[testy == value, 1].std()
for percept, value in perceptindices.iteritems()}
predictionaccuracy = {'acc' + percept:
(predicty[testy == value] ==
testy[testy == value]).mean()
for percept, value in perceptindices.iteritems()}
# Summarise prediction by percentage in percept
predictioncourse = {'timecourse' + percept + str(cutoff):
proby[(testy == value) &
(percentages < cutoff) &
(percentages > cutoff - 0.1), 1].mean()
for percept, value in perceptindices.iteritems()
for cutoff in np.linspace(0.1, 1, 10)}
# Summarise mixed percept time courses by the next percept
nextcourse = {'nextcourse' + percept + str(cutoff):
proby[(testy == 0) &
(percentages < cutoff) &
(percentages > cutoff - 0.1) &
(nextpercept == perceptindices[percept]), 1].mean()
for percept in ['highfreq', 'lowfreq']
for cutoff in np.linspace(0.1, 1, 10)}
afterdominant = {'after' + percept + "_" + after + "_" + str(cutoff):
proby[(testy == perceptindices[percept]) &
(percentages < cutoff) &
(percentages > cutoff - 0.1) &
(nextpercept == perceptindices[after]), 1].mean()
for percept, after in [('highfreq', 'mixed'),
('highfreq', 'lowfreq'),
('lowfreq', 'mixed'),
('lowfreq', 'highfreq')]
for cutoff in np.linspace(0.1, 1, 10)}
# Only return the summarised data
return meanprediction, predictiondev, predictionaccuracy, \
predictioncourse, nextcourse, afterdominant
示例8: plot
# 需要导入模块: from sklearn.gaussian_process import GaussianProcessClassifier [as 别名]
# 或者: from sklearn.gaussian_process.GaussianProcessClassifier import fit [as 别名]
def plot(df, options):
UNIQ_GROUPS = df.group.unique()
UNIQ_GROUPS.sort()
sns.set_style("white")
grppal = sns.color_palette("Set2", len(UNIQ_GROUPS))
print '# UNIQ GROUPS', UNIQ_GROUPS
cent_stats = df.groupby(
['position', 'group', 'side']).apply(stats_per_group)
cent_stats.reset_index(inplace=True)
import time
from sklearn import preprocessing
from sklearn.gaussian_process import GaussianProcessRegressor, GaussianProcessClassifier
from sklearn.gaussian_process.kernels import Matern, WhiteKernel, ExpSineSquared, ConstantKernel, RBF
ctlDF = cent_stats[ cent_stats['group'] == 0 ]
TNRightDF = cent_stats[ cent_stats['group'] != 0]
TNRightDF = TNRightDF[TNRightDF['side'] == 'right']
dataDf = pd.concat([ctlDF, TNRightDF], ignore_index=True)
print dataDf
yDf = dataDf['group'] == 0
yDf = yDf.astype(int)
y = yDf.values
print y
print y.shape
XDf = dataDf[['position', 'values']]
X = XDf.values
X = preprocessing.scale(X)
print X
print X.shape
# kernel = ConstantKernel() + Matern(length_scale=mean, nu=3 / 2) + \
# WhiteKernel(noise_level=1e-10)
kernel = 1**2 * Matern(length_scale=1, nu=1.5) + \
WhiteKernel(noise_level=0.1)
figure = plt.figure(figsize=(10, 6))
stime = time.time()
gp = GaussianProcessClassifier(kernel)
gp.fit(X, y)
print gp.kernel_
print gp.log_marginal_likelihood()
print("Time for GPR fitting: %.3f" % (time.time() - stime))
# create a mesh to plot in
h = 0.1
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
plt.figure(figsize=(10, 5))
# Plot the predicted probabilities. For that, we will assign a color to
# each point in the mesh [x_min, m_max]x[y_min, y_max].
Z = gp.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:,1]
print Z
print Z.shape
# Put the result into a color plot
Z = Z.reshape((xx.shape[0], xx.shape[1]))
print Z.shape
plt.imshow(Z, extent=(x_min, x_max, y_min, y_max), origin="lower")
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=np.array(["r", "g"])[y])
plt.xlabel('position')
plt.ylabel('normalized val')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title("%s, LML: %.3f" %
("TN vs. Control", gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
if options.title:
plt.suptitle(options.title)
if options.output:
plt.savefig(options.output, dpi=150)
#.........这里部分代码省略.........