本文整理汇总了Python中sklearn.linear_model.PassiveAggressiveRegressor类的典型用法代码示例。如果您正苦于以下问题:Python PassiveAggressiveRegressor类的具体用法?Python PassiveAggressiveRegressor怎么用?Python PassiveAggressiveRegressor使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了PassiveAggressiveRegressor类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_regressor_mse
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
示例2: test_regressor_partial_fit
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
示例3: test_regressor_correctness
def test_regressor_correctness(loss):
y_bin = y.copy()
y_bin[y != 1] = -1
reg1 = MyPassiveAggressive(
C=1.0, loss=loss, fit_intercept=True, n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(
C=1.0, tol=None, loss=loss, fit_intercept=True, max_iter=2,
shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
示例4: fancy_text_model
def fancy_text_model(x_train, y_train, x_test, x_valid, cache_name, use_cache=False):
if use_cache:
fhand = open(cache_name, 'r')
data_dict = pickle.load(fhand)
return data_dict['test_pred'], data_dict['valid_pred']
np.random.seed(seed=123)
model = PassiveAggressiveRegressor(n_iter=100, C=1, shuffle=True, random_state=123)
model.fit(x_train, y_train)
test_pred = model.predict(x_test)
valid_pred = model.predict(x_valid)
data_dict = {'test_pred': test_pred, 'valid_pred': valid_pred}
fhand = open(cache_name, 'w')
pickle.dump(data_dict, fhand)
fhand.close()
return test_pred, valid_pred
示例5: test_regressor_correctness
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg2.fit(X, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
示例6: test_regressor_partial_fit
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for average in (False, True):
reg = PassiveAggressiveRegressor(
C=1.0, fit_intercept=True, random_state=0,
average=average, max_iter=100)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
if average:
assert hasattr(reg, 'average_coef_')
assert hasattr(reg, 'average_intercept_')
assert hasattr(reg, 'standard_intercept_')
assert hasattr(reg, 'standard_coef_')
示例7: test_regressor_mse
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
for average in (False, True):
reg = PassiveAggressiveRegressor(
C=1.0, fit_intercept=fit_intercept,
random_state=0, average=average, max_iter=5)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
if average:
assert_true(hasattr(reg, 'average_coef_'))
assert_true(hasattr(reg, 'average_intercept_'))
assert_true(hasattr(reg, 'standard_intercept_'))
assert_true(hasattr(reg, 'standard_coef_'))
示例8: refit_from_scratch
def refit_from_scratch(self):
temp_model = PassiveAggressiveRegressor()
temp_enc = CountVectorizer()
X = [] # binary matrix the presence of tags
Z = [] # additional numerical data
Y = [] # target (to predict) values
db_size = self.db.size()
for data in self.db.yield_all():
feedback = data["feedback"]
tags = data[ "tags" ]
if feedback and tags:
Y.append( feedback )
X.append(" ".join(tags))
Z.append(self.fmt_numerical(data))
X = temp_enc.fit_transform(X)
X = hstack((X, coo_matrix(Z)))
self.allX = X
for i in range(X.shape[0]):
temp_model.partial_fit(X.getrow(i), [Y[0]])
self.model = temp_model
self.enc = temp_enc
示例9: PassiveAggressiveRegressor
tfscaler = preprocessing.StandardScaler().fit(topicsfollowers)
quesparse = quevectorizer.fit_transform(question)
topsparse = topvectorizer.fit_transform(topics)
cfscaled = cfscaler.transform(contextfollowers)
tfscaled = tfscaler.transform(topicsfollowers)
tquesparse = quevectorizer.transform(tquestion)
ttopsparse = topvectorizer.transform(ttopics)
tcfscaled = cfscaler.transform(tcontextfollowers)
ttfscaled = tfscaler.transform(ttopicsfollowers)
par = PassiveAggressiveRegressor()
par.fit(topsparse,y)
pred = par.predict(ttopsparse)
pred[pred<0] = 0
temp = pl.figure("train y")
temp = pl.subplot(2,1,1)
temp = pl.hist(y,1000)
temp = pl.subplot(2,1,2)
yy = y.copy()
yy[yy==0] = 1
temp = pl.hist(np.log10(yy),1000)
temp = pl.figure("test y")
temp = pl.subplot(4,1,1)
示例10: make_scorer
spearman = make_scorer(spearm_cor_func, greater_is_better=True)
# Assemble prediction variables
X_train = X_train_pre.loc[:, important_features_top_100]
X_test = X_test_pre.loc[:, important_features_top_100]
for gene in prioritized_genes:
y_train = train_ess.ix[:, gene]
y_preds_test = []
y_preds_scores = []
# Training
cv = ShuffleSplit(len(y_train), n_iter=5)
for train_i, test_i in cv:
clf = PassiveAggressiveRegressor(epsilon=0.01, n_iter=7).fit(X_train.ix[train_i, :], y_train[train_i])
y_preds_scores.append(spearm_cor_func(clf.predict(X_train.ix[test_i, :]), y_train[test_i]))
y_preds_test.append(clf.predict(X_test))
y_preds_scores = Series(y_preds_scores)
y_preds_test = DataFrame(y_preds_test)
# Predict
y_pred = np.mean(y_preds_test[y_preds_scores.notnull()], axis=0).values
print gene, X_train.shape
# Store results
predictions.ix[gene] = y_pred
filename_gct = save_gct_data(predictions, submission_filename_prefix)
示例11: VarianceThreshold
# Filter by coeficient variation
var_thres = VarianceThreshold(best_var).fit(X_train_pre)
X_train_pre = var_thres.transform(X_train_pre)
X_test_pre = var_thres.transform(X_test_pre)
for gene in genes:
# Assemble prediction variables
X_train = X_train_pre
y_train = train_ess.ix[:, gene]
X_test = X_test_pre
# Feature selection
fs = SelectKBest(f_regression, k=best_k).fit(X_train, y_train)
X_train = fs.transform(X_train)
X_test = fs.transform(X_test)
# Estimation
clf = PassiveAggressiveRegressor(epsilon=best_epsilon, n_iter=best_n_iter).fit(X_train, y_train)
y_pred = clf.predict(X_test)
# Store results
predictions.ix[gene] = y_pred
print gene
filename = save_gct_data(predictions, submission_filename_prefix)
print '[DONE]: Saved to file ' + filename
submit_solution(filename, filename.split('/')[1], ev_code_sc1)
print '[SUBMITED]'
示例12: ElasticNet
br.fit(x, y)
br_sts_scores = br.predict(xt)
# Elastic Net
print 'elastic net'
enr = ElasticNet()
#enr.fit(x[:, np.newaxis], y)
#enr_sts_scores = enr.predict(xt[:, np.newaxis])
enr.fit(x, y)
enr_sts_scores = enr.predict(xt)
# Passive Aggressive Regression
print 'passive aggressive'
par = PassiveAggressiveRegressor()
par.fit(x, y)
par_sts_scores = par.predict(xt)
#par.fit(x[:, np.newaxis], y)
#par_sts_scores = par.predict(xt[:, np.newaxis])
# RANSAC Regression
print 'ransac'
ransac = RANSACRegressor()
#ransac.fit(x[:, np.newaxis], y)
#ransac_sts_scores = ransac.predict(xt[:, np.newaxis])
ransac.fit(x, y)
ransac_sts_scores = ransac.predict(xt)
# Logistic Regression
示例13: main
def main():
X, y, coef = make_regression(1000, 200, 10, 1, noise=0.05, coef=True,
random_state=42)
# X = np.column_stack((X, np.ones(X.shape[0])))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)
# sca = StandardScaler()
# sca.fit(X_train)
# X_train = sca.transform(X_train)
# X_test = sca.transform(X_test)
# print X.shape
# print y.shape
# print coef.shape
param_grid = {
"C": [0.0000001, 0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 10,
100, 1000],
"epsilon": [0.0001, 0.001, 0.01, 0.1]}
param_grid_kern = {
"C": [0.0000001, 0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 10,
100, 1000],
"epsilon": [0.0001, 0.001, 0.01, 0.1],
"gamma": [0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100]}
# "loss": ["pa", "pai", "paii"]}}
my_pa = PARegressor(loss="paii", C=1, epsilon=0.001, n_iter=1,
fit_intercept=False)
#
# search = GridSearchCV(my_pa, param_grid,
# scoring='mean_absolute_error', n_jobs=8, iid=True, refit=True, cv=5,
# verbose=1)
# search.fit(X_train, y_train)
# print search.best_params_
my_pa.fit(X_train, y_train)
print my_pa.coef_
# y_preds = search.predict(X_test)
y_preds = my_pa.predict(X_test)
mae_my_pa = mean_absolute_error(y_test, y_preds)
print "My PA MAE = %2.4f" % mae_my_pa
my_kpa_linear = KernelPARegressor(kernel="linear", loss="paii", C=1, epsilon=0.001, n_iter=1, fit_intercept=False)
my_kpa_linear.fit(X_train, y_train)
print "alphas", len(my_kpa_linear.alphas_), my_kpa_linear.alphas_
y_preds = my_kpa_linear.predict(X_test)
mae_kpa_linear = mean_absolute_error(y_test, y_preds)
print "My KPA linear MAE = %2.4f" % mae_kpa_linear
my_kpa_rbf = KernelPARegressor(kernel="rbf", loss="paii", gamma=0.001, C=1, epsilon=0.001, n_iter=1, fit_intercept=False)
# search = GridSearchCV(my_kpa_rbf, param_grid_kern,
# scoring='mean_absolute_error', n_jobs=8, iid=True, refit=True, cv=5,
# verbose=1)
# search.fit(X_train, y_train)
my_kpa_rbf.fit(X_train, y_train)
print "alphas", len(my_kpa_rbf.alphas_), my_kpa_rbf.alphas_
print "support", len(my_kpa_rbf.support_)
# print "alphas", len(search.best_estimator_.alphas_) # , my_kpa_rbf.alphas_
# print "support", len(search.best_estimator_.support_)
# print search.best_params_
y_preds = my_kpa_rbf.predict(X_test)
# y_preds = search.predict(X_test)
mae_my_kpa = mean_absolute_error(y_test, y_preds)
print "My Kernel PA MAE = %2.4f" % mae_my_kpa
# print search.best_estimator_
# print np.corrcoef(search.best_estimator_.coef_, coef)
# param_grid = {
# "C": [0.001, 0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 10,
# 100, 1000, 10000],
# "epsilon": [0.0001, 0.001, 0.01, 0.1],
# # "loss": ["epsilon_insensitive", "squared_epsilon_insensitive"]}
# "loss": ["squared_epsilon_insensitive"]}
# search = GridSearchCV(PassiveAggressiveRegressor(fit_intercept=True),
# param_grid, scoring='mean_absolute_error', n_jobs=8, iid=True,
# refit=True, cv=5, verbose=1)
# search.fit(X_train, y_train)
sk_pa = PassiveAggressiveRegressor(loss="squared_epsilon_insensitive", C=1,
epsilon=0.001, n_iter=1,
fit_intercept=False,
warm_start=True)
for i in xrange(X_train.shape[0]):
# for x_i, y_i in zip(X_train, y_train):
x = np.array(X_train[i], ndmin=2)
y = np.array(y_train[i], ndmin=1)
# print x.shape
# print y
sk_pa.partial_fit(x, y)
#.........这里部分代码省略.........
示例14: train_test_split
Xtrain = sp.hstack((Xtrain, sp.csr_matrix(sent_df[['polarity', 'subjectivity']].values)))
Xtest = sp.hstack((sp.coo_matrix(test_category_df.values), comm_test))
Xtest = sp.hstack((Xtest, sp.csr_matrix(test_sent_df[['polarity', 'subjectivity']].values)))
Ytrain = np.ravel(quality_df['quality'])
#Ytest = np.ravel(test_quality_df['quality'])
Xtr, Xte, Ytr, Yte = train_test_split(Xtrain, Ytrain,test_size=.25, random_state=0)
ids = test_ids.id
print("Training Models")
m1 = Ridge(normalize=True, alpha=0.001, solver='auto')
m2 = Lasso(normalize=False, alpha=0.0001, selection='cyclic',positive=False)
m3 = ElasticNet(normalize=False, alpha=0.0001,positive=False, l1_ratio = 0.2)
m4 = PassiveAggressiveRegressor(epsilon=0.001, C=100, shuffle=True)
m5 = LinearRegression()
m1.fit(Xtrain, Ytrain)
print("Model 1 Finished")
m2.fit(Xtrain, Ytrain)
print("Model 2 Finished")
m3.fit(Xtrain, Ytrain)
print("Model 3 Finished")
m4.fit(Xtrain, Ytrain)
print("Model 4 Finished")
m5.fit(Xtrain, Ytrain)
print("Model 5 Finished")
models = [m1, m2, m3, m4, m5]