本文整理汇总了Python中sklearn.linear_model.BayesianRidge.fit方法的典型用法代码示例。如果您正苦于以下问题:Python BayesianRidge.fit方法的具体用法?Python BayesianRidge.fit怎么用?Python BayesianRidge.fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.linear_model.BayesianRidge
的用法示例。
在下文中一共展示了BayesianRidge.fit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ridreg
# 需要导入模块: from sklearn.linear_model import BayesianRidge [as 别名]
# 或者: from sklearn.linear_model.BayesianRidge import fit [as 别名]
def ridreg(df,test):
clf = BayesianRidge()
target = df['count']
train = df[['time','temp']]
test = test2[['time','temp']]
clf.fit(train,target)
final = []
print(test.head(3))
for i, row in enumerate(test.values):
y=[]
for x in row:
x= float(x)
y.append(x)
# print(x)
final.append(y)
predicted_probs= clf.predict(final)
# print(predicted_probs.shape)
# predicted_probs = pd.Series(predicted_probs)
# predicted_probs = predicted_probs.map(lambda x: int(x))
keep = pd.read_csv('data/test.csv')
keep = keep['datetime']
# #save to file
predicted_probs= pd.DataFrame(predicted_probs)
print(predicted_probs.head(3))
predicted_probs.to_csv('data/submission3.csv',index=False)
示例2: bayes_ridge_reg
# 需要导入模块: from sklearn.linear_model import BayesianRidge [as 别名]
# 或者: from sklearn.linear_model.BayesianRidge import fit [as 别名]
def bayes_ridge_reg(self):
br = BayesianRidge()
br.fit(self.x_data, self.y_data)
adjusted_result = br.predict(self.x_data)
print "bayes ridge params", br.coef_, br.intercept_
print "bayes ridge accuracy", get_accuracy(adjusted_result, self.y_data)
return map(int, list(adjusted_result))
示例3: bayesian_ridge_regression
# 需要导入模块: from sklearn.linear_model import BayesianRidge [as 别名]
# 或者: from sklearn.linear_model.BayesianRidge import fit [as 别名]
def bayesian_ridge_regression(feature_array, label_array):
clf = BayesianRidge(compute_score=True)
clf.fit(feature_array, label_array)
ols = LinearRegression()
ols.fit(feature_array, label_array)
n_features = 9
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(label_array, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
# plt.plot(clf.coef_[feature_array], 5 * np.ones(len(feature_array)),
# 'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
示例4: bayesRegr
# 需要导入模块: from sklearn.linear_model import BayesianRidge [as 别名]
# 或者: from sklearn.linear_model.BayesianRidge import fit [as 别名]
def bayesRegr(source, target):
# Binarize source
clf = BayesianRidge()
features = source.columns[:-1]
klass = source[source.columns[-1]]
clf.fit(source[features], klass)
preds = clf.predict(target[target.columns[:-1]])
return preds
示例5: fit_model_10
# 需要导入模块: from sklearn.linear_model import BayesianRidge [as 别名]
# 或者: from sklearn.linear_model.BayesianRidge import fit [as 别名]
def fit_model_10(self,toWrite=False):
model = BayesianRidge(n_iter=5000)
for data in self.cv_data:
X_train, X_test, Y_train, Y_test = data
model.fit(X_train,Y_train)
pred = model.predict(X_test)
print("Model 10 score %f" % (logloss(Y_test,pred),))
if toWrite:
f2 = open('model10/model.pkl','w')
pickle.dump(model,f2)
f2.close()
示例6: train_BayesianRegressionModel
# 需要导入模块: from sklearn.linear_model import BayesianRidge [as 别名]
# 或者: from sklearn.linear_model.BayesianRidge import fit [as 别名]
def train_BayesianRegressionModel(
X,
y,
n_iter=300,
tol=0.001,
alpha_1=1e-06,
alpha_2=1e-06,
lambda_1=1e-06,
lambda_2=1e-06,
compute_score=False,
fit_intercept=True,
normalize=False,
copy_X=True,
verbose=False,
):
"""
Train a Bayesian regression model
"""
model = BayesianRidge(
n_iter=n_iter,
tol=tol,
alpha_1=alpha_1,
alpha_2=alpha_2,
lambda_1=lambda_1,
lambda_2=lambda_2,
compute_score=compute_score,
fit_intercept=fit_intercept,
normalize=normalize,
copy_X=copy_X,
verbose=verbose,
)
model = model.fit(X, y)
return model
示例7: br_modeling
# 需要导入模块: from sklearn.linear_model import BayesianRidge [as 别名]
# 或者: from sklearn.linear_model.BayesianRidge import fit [as 别名]
def br_modeling(data, y_name, candidates_location):
from sklearn.linear_model import BayesianRidge
temp = data.copy()
candidates = get_variables("./%s" % candidates_location)
temp = rf_trim(temp, y_name, candidates)
model = BayesianRidge()
res = model.fit(temp[candidates], temp[y_name])
joblib.dump(res, "./%sbr_model%s.pkl" % (y_name, datetime.datetime.today()))
return res
示例8: fit_polynomial_bayesian_skl
# 需要导入模块: from sklearn.linear_model import BayesianRidge [as 别名]
# 或者: from sklearn.linear_model.BayesianRidge import fit [as 别名]
def fit_polynomial_bayesian_skl(X, Y, degree,
lambda_shape=1.e-6, lambda_invscale=1.e-6,
padding=10, n=100,
X_unknown=None):
X_v = pol.polyvander(X, degree)
clf = BayesianRidge(lambda_1=lambda_shape, lambda_2=lambda_invscale)
clf.fit(X_v, Y)
coeff = np.copy(clf.coef_)
# there some weird intercept thing
# since the Vandermonde matrix has 1 at the beginning, just add this
# intercept to the first coeff
coeff[0] += clf.intercept_
ret_ = [coeff]
# generate the line
x = np.linspace(X.min()-padding, X.max()+padding, n)
x_v = pol.polyvander(x, degree)
# using the provided predict method
y_1 = clf.predict(x_v)
# using np.dot() with coeff
y_2 = np.dot(x_v, coeff)
ret_.append(((x, y_1), (x, y_2)))
if X_unknown is not None:
xu_v = pol.polyvander(X_unknown, degree)
# using the predict method
yu_1 = clf.predict(xu_v)
# using np.dot() with coeff
yu_2 = np.dot(xu_v, coeff)
ret_.append(((X_unknown, yu_1), (X_unknown, yu_2)))
return ret_
示例9: train_classiifer
# 需要导入模块: from sklearn.linear_model import BayesianRidge [as 别名]
# 或者: from sklearn.linear_model.BayesianRidge import fit [as 别名]
def train_classiifer(X_train, y_train, to_tune, classifier):
# Initialize Classifier.
clf = BayesianRidge()
clf = SVR(kernel='rbf', C=1e3, gamma=0.1)
#clf = RandomForestRegressor()
if classifier:
clf = classifier
to_tune = False
if to_tune:
# Grid search: find optimal classifier parameters.
param_grid = {'alpha_1': sp_rand(), 'alpha_2': sp_rand()}
param_grid = {'C': sp_rand(), 'gamma': sp_rand()}
rsearch = RandomizedSearchCV(estimator=clf,
param_distributions=param_grid, n_iter=5000)
rsearch.fit(X_train, y_train)
# Use tuned classifier.
clf = rsearch.best_estimator_
# Trains Classifier
clf.fit(X_train, y_train)
return clf
示例10: build_bayesian_rr
# 需要导入模块: from sklearn.linear_model import BayesianRidge [as 别名]
# 或者: from sklearn.linear_model.BayesianRidge import fit [as 别名]
def build_bayesian_rr(x_train, y_train, x_test, y_test, n_features):
"""
Constructing a Bayesian ridge regression model from input dataframe
:param x_train: features dataframe for model training
:param y_train: target dataframe for model training
:param x_test: features dataframe for model testing
:param y_test: target dataframe for model testing
:return: None
"""
clf = BayesianRidge()
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
# Mean absolute error regression loss
mean_abs = sklearn.metrics.mean_absolute_error(y_test, y_pred)
# Mean squared error regression loss
mean_sq = sklearn.metrics.mean_squared_error(y_test, y_pred)
# Median absolute error regression loss
median_abs = sklearn.metrics.median_absolute_error(y_test, y_pred)
# R^2 (coefficient of determination) regression score function
r2 = sklearn.metrics.r2_score(y_test, y_pred)
# Explained variance regression score function
exp_var_score = sklearn.metrics.explained_variance_score(y_test, y_pred)
# Optimal ridge regression alpha value from CV
ridge_alpha = clf.alpha_
with open('../trained_networks/brr_%d_data.pkl' % n_features, 'wb') as results:
pickle.dump(clf, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(mean_abs, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(mean_sq, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(median_abs, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(r2, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(exp_var_score, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(y_pred, results, pickle.HIGHEST_PROTOCOL)
return
示例11: br_modeling
# 需要导入模块: from sklearn.linear_model import BayesianRidge [as 别名]
# 或者: from sklearn.linear_model.BayesianRidge import fit [as 别名]
def br_modeling(data,y_name,candidates_location):
from sklearn.linear_model import BayesianRidge
temp=data.copy()
print("made temp copy")
candidates=get_variables("./%s"%candidates_location)
print("got candidates for regressors")
temp=rf_trim(temp,y_name,candidates)
print("trimmed dataset")
model=BayesianRidge()
print("assigned model")
res=model.fit(temp[candidates],temp[y_name])
print("fit model")
joblib.dump(res,"./%sbr_model%s.pkl"%(y_name,datetime.datetime.today()))
print("saved model")
return res
示例12: bayes_ridge_reg
# 需要导入模块: from sklearn.linear_model import BayesianRidge [as 别名]
# 或者: from sklearn.linear_model.BayesianRidge import fit [as 别名]
def bayes_ridge_reg(x_data,y_data):
br = BayesianRidge()
br.fit(x_data,y_data)
print 'br params',br.coef_,br.intercept_
adjusted_result = br.predict(x_data)
return map(int,list(adjusted_result))
示例13: scale
# 需要导入模块: from sklearn.linear_model import BayesianRidge [as 别名]
# 或者: from sklearn.linear_model.BayesianRidge import fit [as 别名]
df = pd.concat(frames, axis=0, ignore_index=True)
### Imputing DYAR
train = df[(df.DYAR.isnull() ==False) & (df.pct_team_tgts.isnull() == False)]
train.reset_index(inplace=True, drop=True)
test = df[(df.DYAR.isnull() == True) & (df.pct_team_tgts.isnull() == False)]
test.reset_index(inplace= True, drop=True)
features = ['targets', 'receptions', 'rec_tds', 'start_ratio', 'pct_team_tgts', 'pct_team_receptions', 'pct_team_touchdowns',
'rec_yards', 'dpi_yards', 'fumbles', 'first_down_ctchs', 'pct_of_team_passyards']
X = scale(train[features])
y = train.DYAR
# Our best model for predicting DYAR was a Bayesian Ridge Regressor
br = BayesianRidge()
br.fit(X,y)
dyar_predictions = pd.DataFrame(br.predict(scale(test[features])), columns = ['DYAR_predicts'])
test = test.join(dyar_predictions)
test['DYAR'] = test['DYAR_predicts']
test.drop('DYAR_predicts', inplace=True, axis=1)
frames = [train,test]
df = pd.concat(frames, axis=0, ignore_index=True)
### Imputing EYds
train = df[(df.EYds.isnull() ==False) & (df.pct_team_tgts.isnull() == False)]
train.reset_index(inplace=True, drop=True)
test = df[(df.EYds.isnull() == True) & (df.pct_team_tgts.isnull() == False)]
test.reset_index(inplace= True, drop=True)
示例14: LinearRegression
# 需要导入模块: from sklearn.linear_model import BayesianRidge [as 别名]
# 或者: from sklearn.linear_model.BayesianRidge import fit [as 别名]
# Linear Regression
print 'linear'
lr = LinearRegression()
#lr.fit(x[:, np.newaxis], y)
#lr_sts_scores = lr.predict(xt[:, np.newaxis])
lr.fit(x, y)
lr_sts_scores = lr.predict(xt)
# Baysian Ridge Regression
print 'baysian ridge'
br = BayesianRidge(compute_score=True)
#br.fit(x[:, np.newaxis], y)
#br_sts_scores = br.predict(xt[:, np.newaxis])
br.fit(x, y)
br_sts_scores = br.predict(xt)
# Elastic Net
print 'elastic net'
enr = ElasticNet()
#enr.fit(x[:, np.newaxis], y)
#enr_sts_scores = enr.predict(xt[:, np.newaxis])
enr.fit(x, y)
enr_sts_scores = enr.predict(xt)
# Passive Aggressive Regression
print 'passive aggressive'
par = PassiveAggressiveRegressor()
示例15: main
# 需要导入模块: from sklearn.linear_model import BayesianRidge [as 别名]
# 或者: from sklearn.linear_model.BayesianRidge import fit [as 别名]
def main():
parser = argparse.ArgumentParser(description="""Creates embeddings predictions.""")
parser.add_argument('--train')
parser.add_argument('--test')
parser.add_argument('--embeddings')
parser.add_argument('--cv',default=False)
args = parser.parse_args()
stoplist = stopwords.words("english")
stoplist.extend("it's 've 's i'm he's she's you're we're they're i'll you'll he'll ".split(" "))
embeddings={}
for line in codecs.open(args.embeddings,encoding="utf-8").readlines():
line = line.strip()
if line:
a= line.split(" ")
embeddings[a[0]] = np.array([float(v) for v in a[1:]]) #cast to float, otherwise we cannot operate
train_indices = []
test_indices = []
train_scores = []
train_features = []
test_features = []
# if args.learner == "logisticregression":
# learner= LogisticRegression()
# learner_type = "classification"
# elif args.learner == "decisiontreeclassification":
# learner = tree.DecisionTreeClassifier()
# learner_type = "classification"
# elif args.learner == "decisiontreeregression":
# learner = tree.DecisionTreeRegressor()
# learner_type = "regression"
# elif args.learner == "bayesianridge":
# learner = BayesianRidge()
# learner_type = "regression"
# else:
learner = BayesianRidge()
learner_type = "regression"
le = preprocessing.LabelEncoder()
for line in open(args.train).readlines():
(index, score, tweet) = line.strip().split("\t")
train_indices.append(index)
train_scores.append(float(score))
tweet = tweet.split(" ")
train_features.append(embedfeats(tweet,embeddings,stoplist))
train_indices = np.array(train_indices)
train_scores = np.array(train_scores)
train_features = np.array(train_features)
train_scores_int = [roundup(v) for v in train_scores]
le.fit(train_scores_int)
train_scores_int_transformed = le.transform(train_scores_int)
if args.cv:
train_cv={}
cross=cross_validation.KFold(len(train_scores),n_folds=10)
acc=[]
for train_index, test_index in cross:
#if args.debug:
# print("TRAIN:", len(train_index), "TEST:", len(test_index))
X=train_features
y=train_scores
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
learner.fit(X_train,y_train)
y_pred= learner.predict(X_test)
assert(len(y_pred)==len(test_index))
tids=train_indices[test_index]
for twid,pred in zip(tids,y_pred):
train_cv[twid] = pred
acc.append(cosine_similarity(y_test,y_pred)[0][0])
print >>sys.stderr, "Cosine of 10-folds:", acc
print >>sys.stderr, "Macro average:", np.mean(np.array(acc)), np.std(np.array(acc))
for twid in train_indices:
print "{}\t{}".format(twid,train_cv[twid])
else:
for line in open(args.test).readlines():
(index, score, tweet) = line.strip().split("\t")
test_indices.append(index)
#scores.append(score)
tweet = tweet.split(" ")
#.........这里部分代码省略.........