本文整理汇总了Python中sklearn.linear_model.BayesianRidge.score方法的典型用法代码示例。如果您正苦于以下问题:Python BayesianRidge.score方法的具体用法?Python BayesianRidge.score怎么用?Python BayesianRidge.score使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.linear_model.BayesianRidge
的用法示例。
在下文中一共展示了BayesianRidge.score方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: int
# 需要导入模块: from sklearn.linear_model import BayesianRidge [as 别名]
# 或者: from sklearn.linear_model.BayesianRidge import score [as 别名]
test_size=test_size, random_state=0)
# k = int(0.5 * n_features)
# print("-----------------------------------------------")
# print("Perform chi2 feature selection k=", k)
# print("-----------------------------------------------")
# X_train, X_test = selectFeatures(X_train, X_test, y_train, k)
print("-----------------------------------------------")
print("SVM Classification of training set")
print("-----------------------------------------------")
class_weight = {0:5}
print("Class weight=", class_weight)
clf = BayesianRidge(compute_score=True).fit(X_train, y_train)
print("Test svm.SVC score=", clf.score(X_test, y_test))
print("Train svm.SVC score=", clf.score(X_train, y_train))
print("-----------------------------------------------")
print("Metrics on TEST SET")
print("-----------------------------------------------")
y_pred = clf.predict(X_test)
print(metrics.classification_report(y_test, y_pred, target_names=label_names))
print(metrics.confusion_matrix(y_test, y_pred))
print("-----------------------------------------------")
print("Metrics on TRAIN SET")
print("-----------------------------------------------")
y_predTrain = clf.predict(X_train)
示例2: main
# 需要导入模块: from sklearn.linear_model import BayesianRidge [as 别名]
# 或者: from sklearn.linear_model.BayesianRidge import score [as 别名]
def main():
usage = 'usage: %prog [options] <repr_hdf5> <data_hdf5> <target_index>'
parser = OptionParser(usage)
parser.add_option('-a', dest='add_only', default=False, action='store_true', help='Use additional features only; no sequence features')
parser.add_option('-b', dest='balance', default=False, action='store_true', help='Downsample the negative set to balance [Default: %default]')
parser.add_option('-o', dest='out_dir', default='postmodel', help='Output directory [Default: %default]')
parser.add_option('-r', dest='regression', default=False, action='store_true', help='Regression mode [Default: %default]')
parser.add_option('-s', dest='seq_only', default=False, action='store_true', help='Use sequence features only; no additional features [Default: %default]')
parser.add_option('--sample', dest='sample', default=None, type='int', help='Sample from the training set [Default: %default]')
parser.add_option('-t', dest='target_hdf5', default=None, help='Extract targets from this HDF5 rather than data_hdf5 argument')
parser.add_option('-x', dest='regex_add', default=None, help='Filter additional features using a comma-separated list of regular expressions')
(options,args) = parser.parse_args()
if len(args) != 3:
parser.error('Must provide full data HDF5, representation HDF5, and target index or filename')
else:
repr_hdf5_file = args[0]
data_hdf5_file = args[1]
target_i = args[2]
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
random.seed(1)
#######################################################
# preprocessing
#######################################################
# load training targets
data_hdf5_in = h5py.File(data_hdf5_file, 'r')
if options.target_hdf5:
target_hdf5_in = h5py.File(options.target_hdf5, 'r')
else:
target_hdf5_in = data_hdf5_in
train_y = np.array(target_hdf5_in['train_out'])[:,target_i]
test_y = np.array(target_hdf5_in['test_out'])[:,target_i]
# load training representations
if not options.add_only:
repr_hdf5_in = h5py.File(repr_hdf5_file, 'r')
train_x = np.array(repr_hdf5_in['train_repr'])
test_x = np.array(repr_hdf5_in['test_repr'])
repr_hdf5_in.close()
if options.seq_only:
add_labels = []
else:
# load additional features
train_a = np.array(data_hdf5_in['train_add'])
test_a = np.array(data_hdf5_in['test_add'])
add_labels = np.array(data_hdf5_in['add_labels'])
if options.regex_add:
fi = filter_regex(options.regex_add, add_labels)
train_a, test_a, add_labels = train_a[:,fi], test_a[:,fi], add_labels[fi]
# append additional features
if options.add_only:
add_i = 0
train_x, test_x = train_a, test_a
else:
add_i = train_x.shape[1]
train_x = np.concatenate((train_x,train_a), axis=1)
test_x = np.concatenate((test_x,test_a), axis=1)
data_hdf5_in.close()
if options.target_hdf5:
target_hdf5_in.close()
# balance
if options.balance:
train_x, train_y = balance(train_x, train_y)
# sample
if options.sample is not None and options.sample < train_x.shape[0]:
sample_indexes = random.sample(range(train_x.shape[0]), options.sample)
train_x = train_x[sample_indexes]
train_y = train_y[sample_indexes]
#######################################################
# model
#######################################################
if options.regression:
# fit
model = BayesianRidge(fit_intercept=True)
model.fit(train_x, train_y)
# accuracy
acc_out = open('%s/r2.txt' % options.out_dir, 'w')
print >> acc_out, model.score(test_x, test_y)
acc_out.close()
test_preds = model.predict(test_x)
# plot a sample of predictions versus actual
plt.figure()
sns.jointplot(test_preds[:5000], test_y[:5000], joint_kws={'alpha':0.3})
#.........这里部分代码省略.........
示例3: main
# 需要导入模块: from sklearn.linear_model import BayesianRidge [as 别名]
# 或者: from sklearn.linear_model.BayesianRidge import score [as 别名]
def main():
usage = "usage: %prog [options] <model_file>"
parser = OptionParser(usage)
parser.add_option(
"-c",
dest="center_dist",
default=10,
type="int",
help="Distance between the motifs and sequence center [Default: %default]",
)
parser.add_option(
"-d", dest="model_hdf5_file", default=None, help="Pre-computed model output as HDF5 [Default: %default]"
)
parser.add_option(
"-g", dest="cuda", default=False, action="store_true", help="Run on the GPGPU [Default: %default]"
)
parser.add_option("-l", dest="seq_length", default=600, type="int", help="Sequence length [Default: %default]")
parser.add_option("-o", dest="out_dir", default="heat", help="Output directory [Default: %default]")
parser.add_option(
"-t",
dest="targets",
default="0",
help="Comma-separated list of target indexes to plot (or -1 for all) [Default: %default]",
)
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("Must provide Basset model file")
else:
model_file = args[0]
out_targets = [int(ti) for ti in options.targets.split(",")]
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
random.seed(1)
# torch options
cuda_str = ""
if options.cuda:
cuda_str = "-cuda"
#################################################################
# place filter consensus motifs
#################################################################
# determine filter consensus motifs
filter_consensus = get_filter_consensus(model_file, options.out_dir, cuda_str)
seqs_1hot = []
# num_filters = len(filter_consensus)
num_filters = 20
filter_len = filter_consensus[0].shape[1]
# position the motifs
left_i = options.seq_length / 2 - options.center_dist - filter_len
right_i = options.seq_length / 2 + options.center_dist
ns_1hot = np.zeros((4, options.seq_length)) + 0.25
# ns_1hot = np.zeros((4,options.seq_length))
# for i in range(options.seq_length):
# nt_i = random.randint(0,3)
# ns_1hot[nt_i,i] = 1
for i in range(num_filters):
for j in range(num_filters):
# copy the sequence of N's
motifs_seq = np.copy(ns_1hot)
# write them into the one hot coding
motifs_seq[:, left_i : left_i + filter_len] = filter_consensus[i]
motifs_seq[:, right_i : right_i + filter_len] = filter_consensus[j]
# save
seqs_1hot.append(motifs_seq)
# make a full array
seqs_1hot = np.array(seqs_1hot)
# reshape for spatial
seqs_1hot = seqs_1hot.reshape((seqs_1hot.shape[0], 4, 1, options.seq_length))
#################################################################
# place filter consensus motifs
#################################################################
# save to HDF5
seqs_file = "%s/motif_seqs.h5" % options.out_dir
h5f = h5py.File(seqs_file, "w")
h5f.create_dataset("test_in", data=seqs_1hot)
h5f.close()
# predict scores
scores_file = "%s/motif_seqs_scores.h5" % options.out_dir
torch_cmd = "th basset_place2_predict.lua %s %s %s %s" % (cuda_str, model_file, seqs_file, scores_file)
subprocess.call(torch_cmd, shell=True)
# load in scores
hdf5_in = h5py.File(scores_file, "r")
motif_seq_scores = np.array(hdf5_in["scores"])
hdf5_in.close()
#.........这里部分代码省略.........
示例4: time
# 需要导入模块: from sklearn.linear_model import BayesianRidge [as 别名]
# 或者: from sklearn.linear_model.BayesianRidge import score [as 别名]
# random_state=0))
t1 = time()
sc.fit(X_train, y_train)
sc_time = time() -t1
computed_coefs = sc.inverse_transform()
computed_coefs = np.reshape(computed_coefs, [size, size, size])
score = sc.score(X_test, y_test)
###############################################################################
# Compute the results for simple BayesianRidge
t1 = time()
clf.fit(X_train, y_train)
bayes_time = time() - t1
bayes_coefs = clf.coef_
bayes_score = clf.score(X_test, y_test)
bayes_coefs = bayes_coefs.reshape((size, size, size))
###############################################################################
# Plot the results
pl.close('all')
pl.figure()
pl.title('Scores of the supervised clustering')
pl.subplot(2, 1, 1)
pl.plot(np.arange(len(sc.scores_)), sc.scores_)
pl.xlabel('score')
pl.ylabel('iteration')
pl.title('Score of the best parcellation of each iteration')
pl.subplot(2, 1, 2)
示例5: prediction_BayesianRidge
# 需要导入模块: from sklearn.linear_model import BayesianRidge [as 别名]
# 或者: from sklearn.linear_model.BayesianRidge import score [as 别名]
def prediction_BayesianRidge (X_train, Y_train, X_test, Y_test,normalize):
# Print shapes of the training and testing data sets
#print ("Shapes of the training and testing data sets")
#print(X_train.shape, X_test.shape, Y_train.shape, Y_test.shape)
#Create our regression object
lreg = BayesianRidge(normalize=normalize)
#do a linear regression, except only on the training
lreg.fit(X_train,Y_train)
#print("The estimated intercept coefficient is %.2f " %lreg.intercept_)
#print("The number of coefficients used was %d " % len(lreg.coef_))
# Set a DataFrame from the Facts
coeff_df = DataFrame(X_train.columns)
coeff_df.columns = ["Fact"]
# Set a new column lining up the coefficients from the linear regression
coeff_df["Coefficient"] = pd.Series(lreg.coef_)
# Show
#coeff_df
#highest correlation between a fact and fraction votes
#print ("Highest correlation fact: %s is %.9f" % (cf_dict.loc[coeff_df.iloc[coeff_df["Coefficient"].idxmax()]["Fact"],"description"], coeff_df.iloc[coeff_df["Coefficient"].idxmax()]["Coefficient"]) )
#sns_plot = sns.jointplot(coeff_df.iloc[coeff_df["Coefficient"].idxmax()]["Fact"],"Fraction Votes",pd.merge(X_test,pd.DataFrame(Y_test), right_index=True, left_index=True),kind="scatter")
#Predictions on training and testing sets
pred_train = lreg.predict(X_train)
pred_test = lreg.predict(X_test)
# The mean square error
#print("MSE with X_train and Y_train: %.6f" % np.mean((Y_train - pred_train) ** 2))
#print("MSE with X_test and Y_test: %.6f" %np.mean((Y_test - pred_test) ** 2))
#Explained variance score: 1 is perfect prediction
#print("Variance score: %.2f" % lreg.score(X_test, Y_test))
result={}
result["method"]="BayesianRidge"
if normalize :
result["normalize"]="Y"
else:
result["normalize"]="N"
result["X_train_shape"]=X_train.shape
result["Y_train_shape"]=Y_train.shape
result["X_test_shape"]=X_test.shape
result["Y_test_shape"]=Y_test.shape
result["intercept"]=lreg.intercept_
result["num_coef"]=len(lreg.coef_)
result["max_fact"]=cf_dict.loc[coeff_df.iloc[coeff_df["Coefficient"].idxmax()]["Fact"],"description"]
result["max_fact_value"]=coeff_df.iloc[coeff_df["Coefficient"].idxmax()]["Coefficient"]
result["MSE_train"]=np.mean((Y_train - pred_train) ** 2)
result["MSE_test"]=np.mean((Y_test - pred_test) ** 2)
result["variance"]=lreg.score(X_test, Y_test)
return pred_test,coeff_df,pred_train,result
开发者ID:KaterynaD,项目名称:2016-US-President-Election-Primary-Results-Analysis,代码行数:66,代码来源:LinearRegression.py
示例6: main
# 需要导入模块: from sklearn.linear_model import BayesianRidge [as 别名]
# 或者: from sklearn.linear_model.BayesianRidge import score [as 别名]
def main():
usage = 'usage: %prog [options] <model_file>'
parser = OptionParser(usage)
parser.add_option('-c', dest='center_dist', default=10, type='int', help='Distance between the motifs and sequence center [Default: %default]')
parser.add_option('-d', dest='model_hdf5_file', default=None, help='Pre-computed model output as HDF5 [Default: %default]')
parser.add_option('-g', dest='cuda', default=False, action='store_true', help='Run on the GPGPU [Default: %default]')
parser.add_option('-l', dest='seq_length', default=600, type='int', help='Sequence length [Default: %default]')
parser.add_option('-o', dest='out_dir', default='heat', help='Output directory [Default: %default]')
parser.add_option('-t', dest='targets', default='0', help='Comma-separated list of target indexes to plot (or -1 for all) [Default: %default]')
(options,args) = parser.parse_args()
if len(args) != 1:
parser.error('Must provide Basset model file')
else:
model_file = args[0]
out_targets = [int(ti) for ti in options.targets.split(',')]
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
random.seed(1)
# torch options
cuda_str = ''
if options.cuda:
cuda_str = '-cuda'
#################################################################
# place filter consensus motifs
#################################################################
# determine filter consensus motifs
filter_consensus = get_filter_consensus(model_file, options.out_dir, cuda_str)
seqs_1hot = []
num_filters = len(filter_consensus)
# num_filters = 40
filter_len = filter_consensus[0].shape[1]
# position the motifs
left_i = options.seq_length/2 - options.center_dist - filter_len
right_i = options.seq_length/2 + options.center_dist
ns_1hot = np.zeros((4,options.seq_length)) + 0.25
# ns_1hot = np.zeros((4,options.seq_length))
# for i in range(options.seq_length):
# nt_i = random.randint(0,3)
# ns_1hot[nt_i,i] = 1
for i in range(num_filters):
for j in range(num_filters):
# copy the sequence of N's
motifs_seq = np.copy(ns_1hot)
# write them into the one hot coding
motifs_seq[:,left_i:left_i+filter_len] = filter_consensus[i]
motifs_seq[:,right_i:right_i+filter_len] = filter_consensus[j]
# save
seqs_1hot.append(motifs_seq)
# make a full array
seqs_1hot = np.array(seqs_1hot)
# reshape for spatial
seqs_1hot = seqs_1hot.reshape((seqs_1hot.shape[0],4,1,options.seq_length))
#################################################################
# place filter consensus motifs
#################################################################
# save to HDF5
seqs_file = '%s/motif_seqs.h5' % options.out_dir
h5f = h5py.File(seqs_file, 'w')
h5f.create_dataset('test_in', data=seqs_1hot)
h5f.close()
# predict scores
scores_file = '%s/motif_seqs_scores.h5' % options.out_dir
torch_cmd = 'th basset_place2_predict.lua %s %s %s %s' % (cuda_str, model_file, seqs_file, scores_file)
subprocess.call(torch_cmd, shell=True)
# load in scores
hdf5_in = h5py.File(scores_file, 'r')
motif_seq_scores = np.array(hdf5_in['scores'])
hdf5_in.close()
#################################################################
# analyze
#################################################################
for ti in out_targets:
#################################################################
# compute pairwise expectations
#################################################################
# X = np.zeros((motif_seq_scores.shape[0],num_filters))
# xi = 0
# for i in range(num_filters):
# for j in range(num_filters):
# X[xi,i] += 1
# X[xi,j] += 1
#.........这里部分代码省略.........