本文整理汇总了Python中sklearn.linear_model.PassiveAggressiveRegressor.predict方法的典型用法代码示例。如果您正苦于以下问题:Python PassiveAggressiveRegressor.predict方法的具体用法?Python PassiveAggressiveRegressor.predict怎么用?Python PassiveAggressiveRegressor.predict使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.linear_model.PassiveAggressiveRegressor
的用法示例。
在下文中一共展示了PassiveAggressiveRegressor.predict方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fancy_text_model
# 需要导入模块: from sklearn.linear_model import PassiveAggressiveRegressor [as 别名]
# 或者: from sklearn.linear_model.PassiveAggressiveRegressor import predict [as 别名]
def fancy_text_model(x_train, y_train, x_test, x_valid, cache_name, use_cache=False):
if use_cache:
fhand = open(cache_name, 'r')
data_dict = pickle.load(fhand)
return data_dict['test_pred'], data_dict['valid_pred']
np.random.seed(seed=123)
model = PassiveAggressiveRegressor(n_iter=100, C=1, shuffle=True, random_state=123)
model.fit(x_train, y_train)
test_pred = model.predict(x_test)
valid_pred = model.predict(x_valid)
data_dict = {'test_pred': test_pred, 'valid_pred': valid_pred}
fhand = open(cache_name, 'w')
pickle.dump(data_dict, fhand)
fhand.close()
return test_pred, valid_pred
示例2: test_regressor_partial_fit
# 需要导入模块: from sklearn.linear_model import PassiveAggressiveRegressor [as 别名]
# 或者: from sklearn.linear_model.PassiveAggressiveRegressor import predict [as 别名]
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
示例3: test_regressor_mse
# 需要导入模块: from sklearn.linear_model import PassiveAggressiveRegressor [as 别名]
# 或者: from sklearn.linear_model.PassiveAggressiveRegressor import predict [as 别名]
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
示例4: test_regressor_partial_fit
# 需要导入模块: from sklearn.linear_model import PassiveAggressiveRegressor [as 别名]
# 或者: from sklearn.linear_model.PassiveAggressiveRegressor import predict [as 别名]
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for average in (False, True):
reg = PassiveAggressiveRegressor(
C=1.0, fit_intercept=True, random_state=0,
average=average, max_iter=100)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
if average:
assert hasattr(reg, 'average_coef_')
assert hasattr(reg, 'average_intercept_')
assert hasattr(reg, 'standard_intercept_')
assert hasattr(reg, 'standard_coef_')
示例5: test_regressor_mse
# 需要导入模块: from sklearn.linear_model import PassiveAggressiveRegressor [as 别名]
# 或者: from sklearn.linear_model.PassiveAggressiveRegressor import predict [as 别名]
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
for average in (False, True):
reg = PassiveAggressiveRegressor(
C=1.0, fit_intercept=fit_intercept,
random_state=0, average=average, max_iter=5)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
if average:
assert_true(hasattr(reg, 'average_coef_'))
assert_true(hasattr(reg, 'average_intercept_'))
assert_true(hasattr(reg, 'standard_intercept_'))
assert_true(hasattr(reg, 'standard_coef_'))
示例6: PassiveAggressiveRegressor
# 需要导入模块: from sklearn.linear_model import PassiveAggressiveRegressor [as 别名]
# 或者: from sklearn.linear_model.PassiveAggressiveRegressor import predict [as 别名]
quesparse = quevectorizer.fit_transform(question)
topsparse = topvectorizer.fit_transform(topics)
cfscaled = cfscaler.transform(contextfollowers)
tfscaled = tfscaler.transform(topicsfollowers)
tquesparse = quevectorizer.transform(tquestion)
ttopsparse = topvectorizer.transform(ttopics)
tcfscaled = cfscaler.transform(tcontextfollowers)
ttfscaled = tfscaler.transform(ttopicsfollowers)
par = PassiveAggressiveRegressor()
par.fit(topsparse,y)
pred = par.predict(ttopsparse)
pred[pred<0] = 0
temp = pl.figure("train y")
temp = pl.subplot(2,1,1)
temp = pl.hist(y,1000)
temp = pl.subplot(2,1,2)
yy = y.copy()
yy[yy==0] = 1
temp = pl.hist(np.log10(yy),1000)
temp = pl.figure("test y")
temp = pl.subplot(4,1,1)
temp = pl.hist(pred,1000)
temp = pl.subplot(4,1,2)
示例7: ShuffleSplit
# 需要导入模块: from sklearn.linear_model import PassiveAggressiveRegressor [as 别名]
# 或者: from sklearn.linear_model.PassiveAggressiveRegressor import predict [as 别名]
# Assemble prediction variables
X_train = X_train_pre.loc[:, important_features_top_100]
X_test = X_test_pre.loc[:, important_features_top_100]
for gene in prioritized_genes:
y_train = train_ess.ix[:, gene]
y_preds_test = []
y_preds_scores = []
# Training
cv = ShuffleSplit(len(y_train), n_iter=5)
for train_i, test_i in cv:
clf = PassiveAggressiveRegressor(epsilon=0.01, n_iter=7).fit(X_train.ix[train_i, :], y_train[train_i])
y_preds_scores.append(spearm_cor_func(clf.predict(X_train.ix[test_i, :]), y_train[test_i]))
y_preds_test.append(clf.predict(X_test))
y_preds_scores = Series(y_preds_scores)
y_preds_test = DataFrame(y_preds_test)
# Predict
y_pred = np.mean(y_preds_test[y_preds_scores.notnull()], axis=0).values
print gene, X_train.shape
# Store results
predictions.ix[gene] = y_pred
filename_gct = save_gct_data(predictions, submission_filename_prefix)
print '[DONE]: Saved to file ' + filename_gct
示例8: VarianceThreshold
# 需要导入模块: from sklearn.linear_model import PassiveAggressiveRegressor [as 别名]
# 或者: from sklearn.linear_model.PassiveAggressiveRegressor import predict [as 别名]
# Filter by coeficient variation
var_thres = VarianceThreshold(best_var).fit(X_train_pre)
X_train_pre = var_thres.transform(X_train_pre)
X_test_pre = var_thres.transform(X_test_pre)
for gene in genes:
# Assemble prediction variables
X_train = X_train_pre
y_train = train_ess.ix[:, gene]
X_test = X_test_pre
# Feature selection
fs = SelectKBest(f_regression, k=best_k).fit(X_train, y_train)
X_train = fs.transform(X_train)
X_test = fs.transform(X_test)
# Estimation
clf = PassiveAggressiveRegressor(epsilon=best_epsilon, n_iter=best_n_iter).fit(X_train, y_train)
y_pred = clf.predict(X_test)
# Store results
predictions.ix[gene] = y_pred
print gene
filename = save_gct_data(predictions, submission_filename_prefix)
print '[DONE]: Saved to file ' + filename
submit_solution(filename, filename.split('/')[1], ev_code_sc1)
print '[SUBMITED]'
示例9: ElasticNet
# 需要导入模块: from sklearn.linear_model import PassiveAggressiveRegressor [as 别名]
# 或者: from sklearn.linear_model.PassiveAggressiveRegressor import predict [as 别名]
# Elastic Net
print 'elastic net'
enr = ElasticNet()
#enr.fit(x[:, np.newaxis], y)
#enr_sts_scores = enr.predict(xt[:, np.newaxis])
enr.fit(x, y)
enr_sts_scores = enr.predict(xt)
# Passive Aggressive Regression
print 'passive aggressive'
par = PassiveAggressiveRegressor()
par.fit(x, y)
par_sts_scores = par.predict(xt)
#par.fit(x[:, np.newaxis], y)
#par_sts_scores = par.predict(xt[:, np.newaxis])
# RANSAC Regression
print 'ransac'
ransac = RANSACRegressor()
#ransac.fit(x[:, np.newaxis], y)
#ransac_sts_scores = ransac.predict(xt[:, np.newaxis])
ransac.fit(x, y)
ransac_sts_scores = ransac.predict(xt)
# Logistic Regression
print 'logistic'
lgr = LogisticRegression()
示例10: main
# 需要导入模块: from sklearn.linear_model import PassiveAggressiveRegressor [as 别名]
# 或者: from sklearn.linear_model.PassiveAggressiveRegressor import predict [as 别名]
def main():
X, y, coef = make_regression(1000, 200, 10, 1, noise=0.05, coef=True,
random_state=42)
# X = np.column_stack((X, np.ones(X.shape[0])))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)
# sca = StandardScaler()
# sca.fit(X_train)
# X_train = sca.transform(X_train)
# X_test = sca.transform(X_test)
# print X.shape
# print y.shape
# print coef.shape
param_grid = {
"C": [0.0000001, 0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 10,
100, 1000],
"epsilon": [0.0001, 0.001, 0.01, 0.1]}
param_grid_kern = {
"C": [0.0000001, 0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 10,
100, 1000],
"epsilon": [0.0001, 0.001, 0.01, 0.1],
"gamma": [0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100]}
# "loss": ["pa", "pai", "paii"]}}
my_pa = PARegressor(loss="paii", C=1, epsilon=0.001, n_iter=1,
fit_intercept=False)
#
# search = GridSearchCV(my_pa, param_grid,
# scoring='mean_absolute_error', n_jobs=8, iid=True, refit=True, cv=5,
# verbose=1)
# search.fit(X_train, y_train)
# print search.best_params_
my_pa.fit(X_train, y_train)
print my_pa.coef_
# y_preds = search.predict(X_test)
y_preds = my_pa.predict(X_test)
mae_my_pa = mean_absolute_error(y_test, y_preds)
print "My PA MAE = %2.4f" % mae_my_pa
my_kpa_linear = KernelPARegressor(kernel="linear", loss="paii", C=1, epsilon=0.001, n_iter=1, fit_intercept=False)
my_kpa_linear.fit(X_train, y_train)
print "alphas", len(my_kpa_linear.alphas_), my_kpa_linear.alphas_
y_preds = my_kpa_linear.predict(X_test)
mae_kpa_linear = mean_absolute_error(y_test, y_preds)
print "My KPA linear MAE = %2.4f" % mae_kpa_linear
my_kpa_rbf = KernelPARegressor(kernel="rbf", loss="paii", gamma=0.001, C=1, epsilon=0.001, n_iter=1, fit_intercept=False)
# search = GridSearchCV(my_kpa_rbf, param_grid_kern,
# scoring='mean_absolute_error', n_jobs=8, iid=True, refit=True, cv=5,
# verbose=1)
# search.fit(X_train, y_train)
my_kpa_rbf.fit(X_train, y_train)
print "alphas", len(my_kpa_rbf.alphas_), my_kpa_rbf.alphas_
print "support", len(my_kpa_rbf.support_)
# print "alphas", len(search.best_estimator_.alphas_) # , my_kpa_rbf.alphas_
# print "support", len(search.best_estimator_.support_)
# print search.best_params_
y_preds = my_kpa_rbf.predict(X_test)
# y_preds = search.predict(X_test)
mae_my_kpa = mean_absolute_error(y_test, y_preds)
print "My Kernel PA MAE = %2.4f" % mae_my_kpa
# print search.best_estimator_
# print np.corrcoef(search.best_estimator_.coef_, coef)
# param_grid = {
# "C": [0.001, 0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 10,
# 100, 1000, 10000],
# "epsilon": [0.0001, 0.001, 0.01, 0.1],
# # "loss": ["epsilon_insensitive", "squared_epsilon_insensitive"]}
# "loss": ["squared_epsilon_insensitive"]}
# search = GridSearchCV(PassiveAggressiveRegressor(fit_intercept=True),
# param_grid, scoring='mean_absolute_error', n_jobs=8, iid=True,
# refit=True, cv=5, verbose=1)
# search.fit(X_train, y_train)
sk_pa = PassiveAggressiveRegressor(loss="squared_epsilon_insensitive", C=1,
epsilon=0.001, n_iter=1,
fit_intercept=False,
warm_start=True)
for i in xrange(X_train.shape[0]):
# for x_i, y_i in zip(X_train, y_train):
x = np.array(X_train[i], ndmin=2)
y = np.array(y_train[i], ndmin=1)
# print x.shape
# print y
sk_pa.partial_fit(x, y)
#.........这里部分代码省略.........