本文整理汇总了Python中sklearn.dummy.DummyClassifier.predict方法的典型用法代码示例。如果您正苦于以下问题:Python DummyClassifier.predict方法的具体用法?Python DummyClassifier.predict怎么用?Python DummyClassifier.predict使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.dummy.DummyClassifier
的用法示例。
在下文中一共展示了DummyClassifier.predict方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: compare_dummy_classification
# 需要导入模块: from sklearn.dummy import DummyClassifier [as 别名]
# 或者: from sklearn.dummy.DummyClassifier import predict [as 别名]
def compare_dummy_classification(self):
""" Compares classifier to dummy classifiers. Return results (resultscores_tuple, N.A., N.A.)"""
X_train = self.train_vectors
y_train = self.train_tweetclasses
X_test = self.test_vectors
y_test = self.test_tweetclasses
dummy_results = []
dummy = DummyClassifier(strategy="most_frequent", random_state=0)
dummy.fit(X_train, y_train)
y_true, y_preddum = y_test, dummy.predict(X_test)
tuples = precision_recall_fscore_support(y_true, y_preddum)
dummy1 = DummyClassifier(strategy="stratified", random_state=0)
dummy1.fit(X_train, y_train)
y_true, y_preddum1 = y_test, dummy1.predict(X_test)
tuples1 = precision_recall_fscore_support(y_true, y_preddum1)
dummy2 = DummyClassifier(strategy="uniform", random_state=0)
dummy2.fit(X_train, y_train)
y_true, y_preddum2 = y_test, dummy2.predict(X_test)
tuples2 = precision_recall_fscore_support(y_true, y_preddum2)
resulttuple = ("dummy freq", "N.A.", "N.A.", "N.A.", "N.A.", tuples)
resulttuple1 = ("dummy strat", "N.A.", "N.A.", "N.A.", "N.A.", tuples1)
resulttuple2 = ("dummy uni", "N.A.", "N.A.", "N.A.", "N.A.", tuples2)
dummy_results.append(resulttuple)
dummy_results.append(resulttuple1)
dummy_results.append(resulttuple2)
return dummy_results
示例2: eval_against_dumm
# 需要导入模块: from sklearn.dummy import DummyClassifier [as 别名]
# 或者: from sklearn.dummy.DummyClassifier import predict [as 别名]
def eval_against_dumm(FS, aut_target, myclf, folder):
real_acc = []
dummy1_acc, dummy2_acc, dummy3_acc = [], [], []
clf = copy.deepcopy(myclf)
for train_index, test_index in folder:
clf.fit(FS[train_index, :],aut_target[train_index])
labels = np.asarray(clf.predict(FS[test_index, :]))
acc = np.mean(aut_target[test_index] == labels)
real_acc.append(acc)
clf = DummyClassifier("stratified")
clf.fit(FS[train_index, :], aut_target[train_index])
labels = np.asarray(clf.predict(FS[test_index, :]))
acc = np.mean(aut_target[test_index] == labels)
dummy1_acc.append(acc)
clf = DummyClassifier("most_frequent")
clf.fit(FS[train_index, :], aut_target[train_index])
labels = np.asarray(clf.predict(FS[test_index, :]))
acc = np.mean(aut_target[test_index] == labels)
dummy2_acc.append(acc)
clf = DummyClassifier("uniform")
clf.fit(FS[train_index, :], aut_target[train_index])
labels = np.asarray(clf.predict(FS[test_index, :]))
acc = np.mean(aut_target[test_index] == labels)
dummy3_acc.append(acc)
return np.mean(real_acc), np.mean(dummy1_acc), np.mean(dummy2_acc),\
np.mean(dummy3_acc)
示例3: compare_dummy
# 需要导入模块: from sklearn.dummy import DummyClassifier [as 别名]
# 或者: from sklearn.dummy.DummyClassifier import predict [as 别名]
def compare_dummy(self):
""" Compares classifier to dummy classifiers"""
#print "\nDetailed classification report:\n"
#print "The model is trained on the full development set.\n"
#print "The scores are computed on the full evaluation set.\n"
X_train = self.train_vectors
y_train = self.train_tweetclasses
X_test = self.test_vectors
y_test = self.test_tweetclasses
dummy = DummyClassifier(strategy='most_frequent',random_state=0)
dummy.fit(X_train, y_train)
y_true, y_preddum = y_test, dummy.predict(X_test)
tuples = precision_recall_fscore_support(y_true, y_preddum)
dummy1 = DummyClassifier(strategy='stratified',random_state=0)
dummy1.fit(X_train, y_train)
y_true, y_preddum1 = y_test, dummy1.predict(X_test)
tuples1 = precision_recall_fscore_support(y_true, y_preddum1)
dummy2 = DummyClassifier(strategy='uniform',random_state=0)
dummy2.fit(X_train, y_train)
y_true, y_preddum2 = y_test, dummy2.predict(X_test)
tuples2 = precision_recall_fscore_support(y_true, y_preddum2)
return (tuples, tuples1,tuples2)
示例4: test_classifier_prediction_independent_of_X
# 需要导入模块: from sklearn.dummy import DummyClassifier [as 别名]
# 或者: from sklearn.dummy.DummyClassifier import predict [as 别名]
def test_classifier_prediction_independent_of_X(strategy):
y = [0, 2, 1, 1]
X1 = [[0]] * 4
clf1 = DummyClassifier(strategy=strategy, random_state=0, constant=0)
clf1.fit(X1, y)
predictions1 = clf1.predict(X1)
X2 = [[1]] * 4
clf2 = DummyClassifier(strategy=strategy, random_state=0, constant=0)
clf2.fit(X2, y)
predictions2 = clf2.predict(X2)
assert_array_equal(predictions1, predictions2)
示例5: test_most_frequent_and_prior_strategy_with_2d_column_y
# 需要导入模块: from sklearn.dummy import DummyClassifier [as 别名]
# 或者: from sklearn.dummy.DummyClassifier import predict [as 别名]
def test_most_frequent_and_prior_strategy_with_2d_column_y():
# non-regression test added in
# https://github.com/scikit-learn/scikit-learn/pull/13545
X = [[0], [0], [0], [0]]
y_1d = [1, 2, 1, 1]
y_2d = [[1], [2], [1], [1]]
for strategy in ("most_frequent", "prior"):
clf_1d = DummyClassifier(strategy=strategy, random_state=0)
clf_2d = DummyClassifier(strategy=strategy, random_state=0)
clf_1d.fit(X, y_1d)
clf_2d.fit(X, y_2d)
assert_array_equal(clf_1d.predict(X), clf_2d.predict(X))
示例6: find_best_dummy_classification
# 需要导入模块: from sklearn.dummy import DummyClassifier [as 别名]
# 或者: from sklearn.dummy.DummyClassifier import predict [as 别名]
def find_best_dummy_classification(X, y, test_size=0.3, random_state=0, thresh=0.5, target_names=None, n=1):
"""Try all dummy models."""
X = X.reshape((len(X) ,-1))
# y = y.reshape((len(y) ,-1))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)
dummy_scores = []
for i in range(n):
for strategy in ['most_frequent', 'uniform', 'prior', 'stratified']:
clf = DummyClassifier(strategy=strategy)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
score = clf.score(X_test, y_test)
matthews_corrcoef=sklearn.metrics.matthews_corrcoef(y_test > thresh, y_pred > thresh)
report=parse_classification_report(sklearn.metrics.classification_report(y_test > thresh, y_pred > thresh, target_names=target_names))
dummy_scores.append(
collections.OrderedDict(
strategy='classifier_' + strategy,
matthews_corrcoef=matthews_corrcoef,
score=score,
report=report
)
)
for strategy in ['mean', 'median']:
clf=DummyRegressor(strategy=strategy)
clf.fit(X_train, y_train)
y_pred=clf.predict(X_test)
score=clf.score(X_test, y_test)
matthews_corrcoef=sklearn.metrics.matthews_corrcoef(y_test > thresh, y_pred > thresh)
report=parse_classification_report(sklearn.metrics.classification_report(y_test > thresh, y_pred > thresh, target_names=target_names))
dummy_scores.append(
collections.OrderedDict(
strategy='regressor_' + strategy,
matthews_corrcoef=matthews_corrcoef,
score=score,
report=report
)
)
df=pd.DataFrame(dummy_scores)
df=df.sort_values('matthews_corrcoef', ascending=False)
return df, df[:1].iloc[0].to_dict()
示例7: test_constant_strategy
# 需要导入模块: from sklearn.dummy import DummyClassifier [as 别名]
# 或者: from sklearn.dummy.DummyClassifier import predict [as 别名]
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
示例8: get_scores
# 需要导入模块: from sklearn.dummy import DummyClassifier [as 别名]
# 或者: from sklearn.dummy.DummyClassifier import predict [as 别名]
def get_scores(X, y):
nfolds = 40
cv = StratifiedShuffleSplit(y, n_iter=nfolds, test_size=.05)
dumb = DummyClassifier(strategy='most_frequent')
clf = svm.SVC(class_weight='auto')
param_dist = {"C": [.1, 1, 10],
"kernel": ['rbf', 'linear', 'poly']
}
search = GridSearchCV(clf, param_grid=param_dist,
scoring='mean_absolute_error')
stest, strain, sdummy = [], [], []
for nfeats in range(X.shape[1]):
test_scores, train_scores, dummy_scores = [], [], []
# figure out our possible feature combinations
feats = itertools.combinations(range(X.shape[1]), nfeats + 1)
for my_feats in feats:
for oidx, (train, test) in enumerate(cv):
idx = np.array(my_feats)
y_train, y_test = y[train], y[test]
X_train, X_test = X[train, :], X[test, :]
search.fit(X_train, y_train)
clf = search.best_estimator_
clf.fit(X_train[:, idx], y_train)
train_scores.append(accuracy_score(clf.predict(X_train[:, idx]), y_train))
test_scores.append(accuracy_score(clf.predict(X_test[:, idx]), y_test))
dumb.fit(X_train[:, idx], y_train)
dummy_scores.append(accuracy_score(dumb.predict(X_test[:, idx]), y_test))
sdummy.append(np.mean(dummy_scores))
strain.append(np.mean(train_scores))
stest.append(np.mean(test_scores))
return stest, strain, sdummy
示例9: do_cross_validation
# 需要导入模块: from sklearn.dummy import DummyClassifier [as 别名]
# 或者: from sklearn.dummy.DummyClassifier import predict [as 别名]
def do_cross_validation(labels):
"""Perform the k-fold cross validation.
Perform the k-fold cross validation, collect the result and return the
single test instance predictions, as well as the classification results for
each single fold and for the combination of all folds.
Keyword arguments:
features -- all features
labels -- all labels
"""
skf = StratifiedKFold(labels, NO_OF_FOLDS)
single_predictions = [] # Store each single classification decision
# Store classification results for each fold and for the entire task (i.e.,
# entire cross validation).
classification_result = np.zeros((NO_OF_FOLDS + 1, 5))
for cur_fold, (train_idx, test_idx) in enumerate(skf):
model = DummyClassifier(strategy='most_frequent')
model.fit(None, labels[train_idx])
pred_labels = model.predict(np.zeros(labels[test_idx].shape[0]))
fold_array = np.empty(test_idx.shape[0])
fold_array.fill(cur_fold)
single_predictions.append(np.transpose(np.vstack((fold_array, test_idx,
labels[test_idx], pred_labels))))
classification_result[cur_fold, :] = get_classification_result(cur_fold,
labels[test_idx], pred_labels)
single_predictions = np.vstack(single_predictions)
return single_predictions, classification_result
示例10: get_scores
# 需要导入模块: from sklearn.dummy import DummyClassifier [as 别名]
# 或者: from sklearn.dummy.DummyClassifier import predict [as 别名]
def get_scores(X, y):
nfolds = 200
cv = StratifiedShuffleSplit(y, n_iter=nfolds, test_size=0.2)
dumb = DummyClassifier(strategy="most_frequent")
clf = svm.SVC(class_weight="auto")
clf = linear_model.LogisticRegression()
param_dist = {"C": [0.1, 1, 10], "kernel": ["rbf", "linear", "poly"]}
param_dist = {"C": [1e6, 1e5, 1e4, 1e3, 1e2, 10, 1, 0.1, 0.01, 0.001]}
search = GridSearchCV(clf, param_grid=param_dist, scoring="mean_absolute_error")
test_scores, train_scores, dummy_scores = [], [], []
preds, true_labels = [], []
for oidx, (train, test) in enumerate(cv):
y_train, y_test = y[train], y[test]
X_train, X_test = X[train, :], X[test, :]
search.fit(X_train, y_train)
clf = search.best_estimator_
print search.best_params_
clf.fit(X_train, y_train)
train_scores.append(accuracy_score(clf.predict(X_train), y_train))
test_scores.append(accuracy_score(clf.predict(X_test), y_test))
dumb.fit(X_train, y_train)
dummy_scores.append(accuracy_score(dumb.predict(X_test), y_test))
preds += list(clf.predict(X_test))
true_labels += list(y_test)
return test_scores, train_scores, dummy_scores, preds, true_labels
示例11: test_most_frequent_strategy
# 需要导入模块: from sklearn.dummy import DummyClassifier [as 别名]
# 或者: from sklearn.dummy.DummyClassifier import predict [as 别名]
def test_most_frequent_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="most_frequent", random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
示例12: test_dummy_classifier_on_nan_value
# 需要导入模块: from sklearn.dummy import DummyClassifier [as 别名]
# 或者: from sklearn.dummy.DummyClassifier import predict [as 别名]
def test_dummy_classifier_on_nan_value():
X = [[np.NaN]]
y = [1]
y_expected = [1]
clf = DummyClassifier()
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_equal(y_pred, y_expected)
示例13: main
# 需要导入模块: from sklearn.dummy import DummyClassifier [as 别名]
# 或者: from sklearn.dummy.DummyClassifier import predict [as 别名]
def main(args):
X, y, names = loadData(args.mat)
t = numpy.transpose(X)
ls = []
for name, col in zip(names, t):
ls.append( (name, col) )
for col1,col2 in itertools.combinations(ls, 2):
name1, row1 = col1
name2, row2 = col2
c,p = scipy.stats.pearsonr(row1, row2)
print 'debugCor', name1, name2, c, p
depth = 6
clf = tree.DecisionTreeClassifier(max_depth=depth)
clf = clf.fit(X, y)
dot_data = StringIO()
tree.export_graphviz(clf, feature_names=names, out_file=dot_data)
graph = pydot.graph_from_dot_data( dot_data.getvalue() )
graph.write_pdf(args.plotFile)
sss = StratifiedShuffleSplit(y, 5, test_size=0.1, random_state=442)
for train_index, test_index in sss:
clf = tree.DecisionTreeClassifier(max_depth=depth)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
clf = clf.fit(X_train, y_train)
preds = clf.predict(X_test)
metrics.confusion_matrix( y_test, preds )
print metrics.classification_report(y_test, clf.predict(X_test))
print '\ndummy\n'
for train_index, test_index in sss:
clf = DummyClassifier()
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
clf = clf.fit(X_train, y_train)
preds = clf.predict(X_test)
metrics.confusion_matrix( y_test, preds )
print 'dummy',
print metrics.classification_report(y_test, clf.predict(X_test))
示例14: test_constant_strategy_multioutput
# 需要导入模块: from sklearn.dummy import DummyClassifier [as 别名]
# 或者: from sklearn.dummy.DummyClassifier import predict [as 别名]
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3], [1, 3], [2, 3], [2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
示例15: test_constant_strategy_sparse_target
# 需要导入模块: from sklearn.dummy import DummyClassifier [as 别名]
# 或者: from sklearn.dummy.DummyClassifier import predict [as 别名]
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1], [4, 0], [1, 1], [1, 4], [1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))]))