本文整理汇总了Python中sklearn.linear_model.BayesianRidge类的典型用法代码示例。如果您正苦于以下问题:Python BayesianRidge类的具体用法?Python BayesianRidge怎么用?Python BayesianRidge使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了BayesianRidge类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train_BayesianRegressionModel
def train_BayesianRegressionModel(
X,
y,
n_iter=300,
tol=0.001,
alpha_1=1e-06,
alpha_2=1e-06,
lambda_1=1e-06,
lambda_2=1e-06,
compute_score=False,
fit_intercept=True,
normalize=False,
copy_X=True,
verbose=False,
):
"""
Train a Bayesian regression model
"""
model = BayesianRidge(
n_iter=n_iter,
tol=tol,
alpha_1=alpha_1,
alpha_2=alpha_2,
lambda_1=lambda_1,
lambda_2=lambda_2,
compute_score=compute_score,
fit_intercept=fit_intercept,
normalize=normalize,
copy_X=copy_X,
verbose=verbose,
)
model = model.fit(X, y)
return model
示例2: bayesian_ridge_regression
def bayesian_ridge_regression(feature_array, label_array):
clf = BayesianRidge(compute_score=True)
clf.fit(feature_array, label_array)
ols = LinearRegression()
ols.fit(feature_array, label_array)
n_features = 9
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(label_array, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
# plt.plot(clf.coef_[feature_array], 5 * np.ones(len(feature_array)),
# 'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
示例3: bayes_ridge_reg
def bayes_ridge_reg(self):
br = BayesianRidge()
br.fit(self.x_data, self.y_data)
adjusted_result = br.predict(self.x_data)
print "bayes ridge params", br.coef_, br.intercept_
print "bayes ridge accuracy", get_accuracy(adjusted_result, self.y_data)
return map(int, list(adjusted_result))
示例4: ridreg
def ridreg(df,test):
clf = BayesianRidge()
target = df['count']
train = df[['time','temp']]
test = test2[['time','temp']]
clf.fit(train,target)
final = []
print(test.head(3))
for i, row in enumerate(test.values):
y=[]
for x in row:
x= float(x)
y.append(x)
# print(x)
final.append(y)
predicted_probs= clf.predict(final)
# print(predicted_probs.shape)
# predicted_probs = pd.Series(predicted_probs)
# predicted_probs = predicted_probs.map(lambda x: int(x))
keep = pd.read_csv('data/test.csv')
keep = keep['datetime']
# #save to file
predicted_probs= pd.DataFrame(predicted_probs)
print(predicted_probs.head(3))
predicted_probs.to_csv('data/submission3.csv',index=False)
示例5: bayesRegr
def bayesRegr(source, target):
# Binarize source
clf = BayesianRidge()
features = source.columns[:-1]
klass = source[source.columns[-1]]
clf.fit(source[features], klass)
preds = clf.predict(target[target.columns[:-1]])
return preds
示例6: br_modeling
def br_modeling(data, y_name, candidates_location):
from sklearn.linear_model import BayesianRidge
temp = data.copy()
candidates = get_variables("./%s" % candidates_location)
temp = rf_trim(temp, y_name, candidates)
model = BayesianRidge()
res = model.fit(temp[candidates], temp[y_name])
joblib.dump(res, "./%sbr_model%s.pkl" % (y_name, datetime.datetime.today()))
return res
示例7: fit_model_10
def fit_model_10(self,toWrite=False):
model = BayesianRidge(n_iter=5000)
for data in self.cv_data:
X_train, X_test, Y_train, Y_test = data
model.fit(X_train,Y_train)
pred = model.predict(X_test)
print("Model 10 score %f" % (logloss(Y_test,pred),))
if toWrite:
f2 = open('model10/model.pkl','w')
pickle.dump(model,f2)
f2.close()
示例8: br_modeling
def br_modeling(data,y_name,candidates_location):
from sklearn.linear_model import BayesianRidge
temp=data.copy()
print("made temp copy")
candidates=get_variables("./%s"%candidates_location)
print("got candidates for regressors")
temp=rf_trim(temp,y_name,candidates)
print("trimmed dataset")
model=BayesianRidge()
print("assigned model")
res=model.fit(temp[candidates],temp[y_name])
print("fit model")
joblib.dump(res,"./%sbr_model%s.pkl"%(y_name,datetime.datetime.today()))
print("saved model")
return res
示例9: fit_polynomial_bayesian_skl
def fit_polynomial_bayesian_skl(X, Y, degree,
lambda_shape=1.e-6, lambda_invscale=1.e-6,
padding=10, n=100,
X_unknown=None):
X_v = pol.polyvander(X, degree)
clf = BayesianRidge(lambda_1=lambda_shape, lambda_2=lambda_invscale)
clf.fit(X_v, Y)
coeff = np.copy(clf.coef_)
# there some weird intercept thing
# since the Vandermonde matrix has 1 at the beginning, just add this
# intercept to the first coeff
coeff[0] += clf.intercept_
ret_ = [coeff]
# generate the line
x = np.linspace(X.min()-padding, X.max()+padding, n)
x_v = pol.polyvander(x, degree)
# using the provided predict method
y_1 = clf.predict(x_v)
# using np.dot() with coeff
y_2 = np.dot(x_v, coeff)
ret_.append(((x, y_1), (x, y_2)))
if X_unknown is not None:
xu_v = pol.polyvander(X_unknown, degree)
# using the predict method
yu_1 = clf.predict(xu_v)
# using np.dot() with coeff
yu_2 = np.dot(xu_v, coeff)
ret_.append(((X_unknown, yu_1), (X_unknown, yu_2)))
return ret_
示例10: train_classiifer
def train_classiifer(X_train, y_train, to_tune, classifier):
# Initialize Classifier.
clf = BayesianRidge()
clf = SVR(kernel='rbf', C=1e3, gamma=0.1)
#clf = RandomForestRegressor()
if classifier:
clf = classifier
to_tune = False
if to_tune:
# Grid search: find optimal classifier parameters.
param_grid = {'alpha_1': sp_rand(), 'alpha_2': sp_rand()}
param_grid = {'C': sp_rand(), 'gamma': sp_rand()}
rsearch = RandomizedSearchCV(estimator=clf,
param_distributions=param_grid, n_iter=5000)
rsearch.fit(X_train, y_train)
# Use tuned classifier.
clf = rsearch.best_estimator_
# Trains Classifier
clf.fit(X_train, y_train)
return clf
示例11: build_bayesian_rr
def build_bayesian_rr(x_train, y_train, x_test, y_test, n_features):
"""
Constructing a Bayesian ridge regression model from input dataframe
:param x_train: features dataframe for model training
:param y_train: target dataframe for model training
:param x_test: features dataframe for model testing
:param y_test: target dataframe for model testing
:return: None
"""
clf = BayesianRidge()
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
# Mean absolute error regression loss
mean_abs = sklearn.metrics.mean_absolute_error(y_test, y_pred)
# Mean squared error regression loss
mean_sq = sklearn.metrics.mean_squared_error(y_test, y_pred)
# Median absolute error regression loss
median_abs = sklearn.metrics.median_absolute_error(y_test, y_pred)
# R^2 (coefficient of determination) regression score function
r2 = sklearn.metrics.r2_score(y_test, y_pred)
# Explained variance regression score function
exp_var_score = sklearn.metrics.explained_variance_score(y_test, y_pred)
# Optimal ridge regression alpha value from CV
ridge_alpha = clf.alpha_
with open('../trained_networks/brr_%d_data.pkl' % n_features, 'wb') as results:
pickle.dump(clf, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(mean_abs, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(mean_sq, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(median_abs, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(r2, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(exp_var_score, results, pickle.HIGHEST_PROTOCOL)
pickle.dump(y_pred, results, pickle.HIGHEST_PROTOCOL)
return
示例12: len
xt = x
#print len(_x), len(x), len(y)
# Linear Regression
print 'linear'
lr = LinearRegression()
#lr.fit(x[:, np.newaxis], y)
#lr_sts_scores = lr.predict(xt[:, np.newaxis])
lr.fit(x, y)
lr_sts_scores = lr.predict(xt)
# Baysian Ridge Regression
print 'baysian ridge'
br = BayesianRidge(compute_score=True)
#br.fit(x[:, np.newaxis], y)
#br_sts_scores = br.predict(xt[:, np.newaxis])
br.fit(x, y)
br_sts_scores = br.predict(xt)
# Elastic Net
print 'elastic net'
enr = ElasticNet()
#enr.fit(x[:, np.newaxis], y)
#enr_sts_scores = enr.predict(xt[:, np.newaxis])
enr.fit(x, y)
enr_sts_scores = enr.predict(xt)
示例13: main
#.........这里部分代码省略.........
# write them into the one hot coding
motifs_seq[:,left_i:left_i+filter_len] = filter_consensus[i]
motifs_seq[:,right_i:right_i+filter_len] = filter_consensus[j]
# save
seqs_1hot.append(motifs_seq)
# make a full array
seqs_1hot = np.array(seqs_1hot)
# reshape for spatial
seqs_1hot = seqs_1hot.reshape((seqs_1hot.shape[0],4,1,options.seq_length))
#################################################################
# place filter consensus motifs
#################################################################
# save to HDF5
seqs_file = '%s/motif_seqs.h5' % options.out_dir
h5f = h5py.File(seqs_file, 'w')
h5f.create_dataset('test_in', data=seqs_1hot)
h5f.close()
# predict scores
scores_file = '%s/motif_seqs_scores.h5' % options.out_dir
torch_cmd = 'th basset_place2_predict.lua %s %s %s %s' % (cuda_str, model_file, seqs_file, scores_file)
subprocess.call(torch_cmd, shell=True)
# load in scores
hdf5_in = h5py.File(scores_file, 'r')
motif_seq_scores = np.array(hdf5_in['scores'])
hdf5_in.close()
#################################################################
# analyze
#################################################################
for ti in out_targets:
#################################################################
# compute pairwise expectations
#################################################################
# X = np.zeros((motif_seq_scores.shape[0],num_filters))
# xi = 0
# for i in range(num_filters):
# for j in range(num_filters):
# X[xi,i] += 1
# X[xi,j] += 1
# xi += 1
X = np.zeros((motif_seq_scores.shape[0],2*num_filters))
xi = 0
for i in range(num_filters):
for j in range(num_filters):
X[xi,i] += 1
X[xi,num_filters+j] += 1
xi += 1
# fit model
model = BayesianRidge()
model.fit(X, motif_seq_scores[:,ti])
# predict pairwise expectations
motif_seq_preds = model.predict(X)
print model.score(X, motif_seq_scores[:,ti])
# print filter coefficients
coef_out = open('%s/coefs_t%d.txt' % (options.out_dir,ti), 'w')
for i in range(num_filters):
print >> coef_out, '%3d %6.2f' % (i,model.coef_[i])
coef_out.close()
#################################################################
# normalize pairwise predictions
#################################################################
filter_interaction = np.zeros((num_filters,num_filters))
table_out = open('%s/table_t%d.txt' % (options.out_dir,ti), 'w')
si = 0
for i in range(num_filters):
for j in range(num_filters):
filter_interaction[i,j] = motif_seq_scores[si,ti] - motif_seq_preds[si]
cols = (i, j, motif_seq_scores[si,ti], motif_seq_preds[si], filter_interaction[i,j])
print >> table_out, '%3d %3d %6.3f %6.3f %6.3f' % cols
si += 1
table_out.close()
scores_abs = abs(filter_interaction.flatten())
max_score = stats.quantile(scores_abs, .999)
print 'Limiting scores to +-%f' % max_score
filter_interaction_max = np.zeros((num_filters, num_filters))
for i in range(num_filters):
for j in range(num_filters):
filter_interaction_max[i,j] = np.min([filter_interaction[i,j], max_score])
filter_interaction_max[i,j] = np.max([filter_interaction_max[i,j], -max_score])
# plot heat map
plt.figure()
sns.heatmap(filter_interaction_max, xticklabels=False, yticklabels=False)
plt.savefig('%s/heat_t%d.pdf' % (options.out_dir,ti))
示例14: main
def main():
usage = 'usage: %prog [options] <repr_hdf5> <data_hdf5> <target_index>'
parser = OptionParser(usage)
parser.add_option('-a', dest='add_only', default=False, action='store_true', help='Use additional features only; no sequence features')
parser.add_option('-b', dest='balance', default=False, action='store_true', help='Downsample the negative set to balance [Default: %default]')
parser.add_option('-o', dest='out_dir', default='postmodel', help='Output directory [Default: %default]')
parser.add_option('-r', dest='regression', default=False, action='store_true', help='Regression mode [Default: %default]')
parser.add_option('-s', dest='seq_only', default=False, action='store_true', help='Use sequence features only; no additional features [Default: %default]')
parser.add_option('--sample', dest='sample', default=None, type='int', help='Sample from the training set [Default: %default]')
parser.add_option('-t', dest='target_hdf5', default=None, help='Extract targets from this HDF5 rather than data_hdf5 argument')
parser.add_option('-x', dest='regex_add', default=None, help='Filter additional features using a comma-separated list of regular expressions')
(options,args) = parser.parse_args()
if len(args) != 3:
parser.error('Must provide full data HDF5, representation HDF5, and target index or filename')
else:
repr_hdf5_file = args[0]
data_hdf5_file = args[1]
target_i = args[2]
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
random.seed(1)
#######################################################
# preprocessing
#######################################################
# load training targets
data_hdf5_in = h5py.File(data_hdf5_file, 'r')
if options.target_hdf5:
target_hdf5_in = h5py.File(options.target_hdf5, 'r')
else:
target_hdf5_in = data_hdf5_in
train_y = np.array(target_hdf5_in['train_out'])[:,target_i]
test_y = np.array(target_hdf5_in['test_out'])[:,target_i]
# load training representations
if not options.add_only:
repr_hdf5_in = h5py.File(repr_hdf5_file, 'r')
train_x = np.array(repr_hdf5_in['train_repr'])
test_x = np.array(repr_hdf5_in['test_repr'])
repr_hdf5_in.close()
if options.seq_only:
add_labels = []
else:
# load additional features
train_a = np.array(data_hdf5_in['train_add'])
test_a = np.array(data_hdf5_in['test_add'])
add_labels = np.array(data_hdf5_in['add_labels'])
if options.regex_add:
fi = filter_regex(options.regex_add, add_labels)
train_a, test_a, add_labels = train_a[:,fi], test_a[:,fi], add_labels[fi]
# append additional features
if options.add_only:
add_i = 0
train_x, test_x = train_a, test_a
else:
add_i = train_x.shape[1]
train_x = np.concatenate((train_x,train_a), axis=1)
test_x = np.concatenate((test_x,test_a), axis=1)
data_hdf5_in.close()
if options.target_hdf5:
target_hdf5_in.close()
# balance
if options.balance:
train_x, train_y = balance(train_x, train_y)
# sample
if options.sample is not None and options.sample < train_x.shape[0]:
sample_indexes = random.sample(range(train_x.shape[0]), options.sample)
train_x = train_x[sample_indexes]
train_y = train_y[sample_indexes]
#######################################################
# model
#######################################################
if options.regression:
# fit
model = BayesianRidge(fit_intercept=True)
model.fit(train_x, train_y)
# accuracy
acc_out = open('%s/r2.txt' % options.out_dir, 'w')
print >> acc_out, model.score(test_x, test_y)
acc_out.close()
test_preds = model.predict(test_x)
# plot a sample of predictions versus actual
plt.figure()
sns.jointplot(test_preds[:5000], test_y[:5000], joint_kws={'alpha':0.3})
#.........这里部分代码省略.........
示例15: do_validation
def do_validation(data_path, steps=10):
allfiles = initialize(data_path)
gbm = GradientBoostingRegressor(n_estimators=100, learning_rate=0.05, max_depth=6, min_samples_leaf=5, subsample=0.5)
ada = AdaBoostRegressor(n_estimators=200, learning_rate=1)
etree = ExtraTreesRegressor(n_estimators=200, n_jobs=-1, min_samples_leaf=5)
rf = RandomForestRegressor(n_estimators=200, max_features=4, min_samples_leaf=5)
kn = KNeighborsRegressor(n_neighbors=25)
logit = LogisticRegression(tol=0.05)
enet = ElasticNetCV(l1_ratio=0.75, max_iter=1000, tol=0.05)
svr = SVR(kernel="linear", probability=True)
ridge = Ridge(alpha=18)
bridge = BayesianRidge(n_iter=500)
gbm_metrics = 0.0
ada_metrics = 0.0
etree_metrics = 0.0
rf_metrics = 0.0
kn_metrics = 0.0
logit_metrics = 0.0
svr_metrics = 0.0
ridge_metrics = 0.0
bridge_metrics = 0.0
enet_metrics = 0.0
nnet_metrics = 0.0
logistic = LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
for i in xrange(steps):
driver = allfiles[i]
df, Y = create_merged_dataset(driver)
df['label'] = Y
# Shuffle DF.
df = df.reindex(np.random.permutation(df.index))
train = df[:100]
label = train['label']
del train['label']
test = df[100:400]
Y = test['label']
del test['label']
#to_drop = ['driver', 'trip', 'speed1', 'speed2', 'speed3', 'speed4', 'speed5', 'speed6', 'speed7', 'speed8', 'speed9',
# 'speed10', 'speed11', 'speed12', 'speed13', 'speed14', 'speed15', 'speed16', 'speed17', 'speed18', 'speed19',
# 'speed20', 'speed21', 'speed22', 'speed23', 'speed24', 'speed25', 'speed26', 'speed27', 'speed28', 'speed29',
# 'speed30', 'speed31', 'speed32', 'speed33', 'speed34', 'speed35', 'speed36', 'speed37', 'speed38', 'speed39',
# 'speed40', 'speed41', 'speed42', 'speed43', 'speed44', 'speed45', 'speed46', 'speed47', 'speed48', 'speed49',
# 'speed50', 'speed51', 'speed52', 'speed53', 'speed54', 'speed55', 'speed56', 'speed57', 'speed58', 'speed59',
# 'speed60', 'speed61', 'speed62', 'speed63', 'speed64', 'speed65', 'speed66', 'speed67', 'speed68', 'speed69',
# 'speed70', 'speed71', 'speed72', 'speed73', 'speed74', 'speed75', 'speed76', 'speed77', 'speed78', 'speed79', 'speed80']
to_drop = ['driver', 'trip']
X_train = train.drop(to_drop, 1)
X_test = test.drop(to_drop, 1)
gbm.fit(X_train, label)
Y_hat = gbm.predict(X_test)
fpr, tpr, thresholds = metrics.roc_curve(Y, Y_hat)
gbm_metrics += metrics.auc(fpr, tpr)
ada.fit(X_train, label)
Y_hat = ada.predict(X_test)
fpr, tpr, thresholds = metrics.roc_curve(Y, Y_hat)
ada_metrics += metrics.auc(fpr, tpr)
etree.fit(X_train, label)
Y_hat = etree.predict(X_test)
fpr, tpr, thresholds = metrics.roc_curve(Y, Y_hat)
etree_metrics += metrics.auc(fpr, tpr)
rf.fit(X_train, label)
Y_hat = rf.predict(X_test)
fpr, tpr, thresholds = metrics.roc_curve(Y, Y_hat)
rf_metrics += metrics.auc(fpr, tpr)
kn.fit(X_train, label)
Y_hat = kn.predict(X_test)
fpr, tpr, thresholds = metrics.roc_curve(Y, Y_hat)
kn_metrics += metrics.auc(fpr, tpr)
# Linear models.
to_drop = ['driver', 'trip', 'distance', 'sd_acceleration', 'final_angle', 'mean_acceleration', 'mean_avg_speed', 'sd_inst_speed',
'sd_avg_speed', 'mean_inst_speed', 'points']
X_train = train.drop(to_drop, 1)
X_test = test.drop(to_drop, 1)
logit.fit(X_train, label)
Y_hat = [i[1] for i in logit.predict_proba(X_test)]
fpr, tpr, thresholds = metrics.roc_curve(Y, Y_hat)
logit_metrics += metrics.auc(fpr, tpr)
svr.fit(X_train, label)
Y_hat = svr.predict(X_test)
fpr, tpr, thresholds = metrics.roc_curve(Y, Y_hat)
svr_metrics += metrics.auc(fpr, tpr)
ridge.fit(X_train, label)
#.........这里部分代码省略.........