本文整理匯總了Python中sklearn.discriminant_analysis.LinearDiscriminantAnalysis方法的典型用法代碼示例。如果您正苦於以下問題:Python discriminant_analysis.LinearDiscriminantAnalysis方法的具體用法?Python discriminant_analysis.LinearDiscriminantAnalysis怎麽用?Python discriminant_analysis.LinearDiscriminantAnalysis使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sklearn.discriminant_analysis
的用法示例。
在下文中一共展示了discriminant_analysis.LinearDiscriminantAnalysis方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _tested_estimators
# 需要導入模塊: from sklearn import discriminant_analysis [as 別名]
# 或者: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 別名]
def _tested_estimators():
for name, Estimator in all_estimators():
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
# FIXME _skip_test should be used here (if we could)
required_parameters = getattr(Estimator, "_required_parameters", [])
if len(required_parameters):
if required_parameters in (["estimator"], ["base_estimator"]):
if issubclass(Estimator, RegressorMixin):
estimator = Estimator(Ridge())
else:
estimator = Estimator(LinearDiscriminantAnalysis())
else:
warnings.warn("Can't instantiate estimator {} which requires "
"parameters {}".format(name,
required_parameters),
SkipTestWarning)
continue
else:
estimator = Estimator()
yield name, estimator
示例2: test_lda_priors
# 需要導入模塊: from sklearn import discriminant_analysis [as 別名]
# 或者: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 別名]
def test_lda_priors():
# Test priors (negative priors)
priors = np.array([0.5, -0.5])
clf = LinearDiscriminantAnalysis(priors=priors)
msg = "priors must be non-negative"
assert_raise_message(ValueError, msg, clf.fit, X, y)
# Test that priors passed as a list are correctly handled (run to see if
# failure)
clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5])
clf.fit(X, y)
# Test that priors always sum to 1
priors = np.array([0.5, 0.6])
prior_norm = np.array([0.45, 0.55])
clf = LinearDiscriminantAnalysis(priors=priors)
assert_warns(UserWarning, clf.fit, X, y)
assert_array_almost_equal(clf.priors_, prior_norm, 2)
示例3: test_lda_explained_variance_ratio
# 需要導入模塊: from sklearn import discriminant_analysis [as 別名]
# 或者: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 別名]
def test_lda_explained_variance_ratio():
# Test if the sum of the normalized eigen vectors values equals 1,
# Also tests whether the explained_variance_ratio_ formed by the
# eigen solver is the same as the explained_variance_ratio_ formed
# by the svd solver
state = np.random.RandomState(0)
X = state.normal(loc=0, scale=100, size=(40, 20))
y = state.randint(0, 3, size=(40,))
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_eigen.fit(X, y)
assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3)
assert_equal(clf_lda_eigen.explained_variance_ratio_.shape, (2,),
"Unexpected length for explained_variance_ratio_")
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_svd.fit(X, y)
assert_almost_equal(clf_lda_svd.explained_variance_ratio_.sum(), 1.0, 3)
assert_equal(clf_lda_svd.explained_variance_ratio_.shape, (2,),
"Unexpected length for explained_variance_ratio_")
assert_array_almost_equal(clf_lda_svd.explained_variance_ratio_,
clf_lda_eigen.explained_variance_ratio_)
示例4: test_lda_scaling
# 需要導入模塊: from sklearn import discriminant_analysis [as 別名]
# 或者: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 別名]
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
示例5: test_11_lda
# 需要導入模塊: from sklearn import discriminant_analysis [as 別名]
# 或者: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 別名]
def test_11_lda(self):
print("\ntest 11 (LDA with preprocessing) [multi-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_multi_class_classification()
model = LinearDiscriminantAnalysis()
pipeline_obj = Pipeline([
("scaler", MaxAbsScaler()),
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test11sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
示例6: test_12_lda
# 需要導入模塊: from sklearn import discriminant_analysis [as 別名]
# 或者: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 別名]
def test_12_lda(self):
print("\ntest 12 (LDA with preprocessing) [binary-class]\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_binary_classification()
model = LinearDiscriminantAnalysis()
pipeline_obj = Pipeline([
("scaler", StandardScaler()),
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test12sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
model_prob = pipeline_obj.predict_proba(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True)
示例7: main
# 需要導入模塊: from sklearn import discriminant_analysis [as 別名]
# 或者: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 別名]
def main():
# prepare data
trainingSet=[]
testSet=[]
accuracy = 0.0
split = 0.25
loadDataset('../Dataset/LDAdata.csv', split, trainingSet, testSet)
print('Train set: ' + repr(len(trainingSet)))
print('Test set: ' + repr(len(testSet)))
trainData = np.array(trainingSet)[:,0:np.array(trainingSet).shape[1] - 1]
columns = trainData.shape[1]
X = np.array(trainData)
y = np.array(trainingSet)[:,columns]
clf = BaggingClassifier(LDA())
clf.fit(X, y)
testData = np.array(testSet)[:,0:np.array(trainingSet).shape[1] - 1]
X_test = np.array(testData)
y_test = np.array(testSet)[:,columns]
accuracy = clf.score(X_test,y_test)
accuracy *= 100
print("Accuracy %:",accuracy)
示例8: lda_selection
# 需要導入模塊: from sklearn import discriminant_analysis [as 別名]
# 或者: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 別名]
def lda_selection(X,y,n_components):
"""
Performs the Fisher's Linear Discrimination Analysis keeps the most discriminative features
Keyword arguments:
X -- The feature vectors
y -- The target vector
n_components -- Number of features to keep
"""
if verbose:
print '\nPerforming Linear Discrimination Analysis ...'
lda = LDA(n_components = n_components,solver='eigen')
discriminative_attributes = lda.fit(X, y).transform(X)
return discriminative_attributes
#Random Forest Classifier with an additional attribute coef_, in order to be usable by the Recursive Feature Elimination method
示例9: __init__
# 需要導入模塊: from sklearn import discriminant_analysis [as 別名]
# 或者: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 別名]
def __init__(self, **kwargs):
"""Initializes a ShrinkingLDA classifier.
Additional arguments will be forwarded to the underlying classifier
instantiation, which is
``sklearn.discriminant_analysis.LinearDiscriminantAnalysis`` here.
Keyword Arguments
-----------------
solver: string, default = lsqr
Solver used in LDA
shrinkage: string, default = 'auto'
"""
super(ShrinkingLDA, self).__init__()
self.solver = kwargs.pop('solver', 'lsqr')
self.shrinkage = kwargs.pop('shrinkage', 'auto')
self.clf = _LinearDiscriminantAnalysis(solver=self.solver, shrinkage=self.shrinkage, **kwargs)
示例10: rdm_lda_kfold
# 需要導入模塊: from sklearn import discriminant_analysis [as 別名]
# 或者: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 別名]
def rdm_lda_kfold(x, labels):
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import cross_val_score
lda = LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto')
folding = RepeatedStratifiedKFold(n_splits=3, n_repeats=3)
objects = numpy.unique(labels)
pairs = list(itertools.combinations(objects, 2))
npairs = len(pairs)
utv = numpy.full([npairs,], numpy.nan)
for p in trange(npairs, desc='pairs', leave=False, ascii=True):
pair = pairs[p]
pair_mask = numpy.isin(labels, pair)
x_pair = x[pair_mask, :]
labels_pair = labels[pair_mask]
scores = cross_val_score(lda, x_pair, labels_pair, cv=folding)
utv[p] = scores.mean()
return utv
示例11: test_model_logistic_linear_discriminant_analysis
# 需要導入模塊: from sklearn import discriminant_analysis [as 別名]
# 或者: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 別名]
def test_model_logistic_linear_discriminant_analysis(self):
X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
y = np.array([1, 1, 1, 2, 2, 2])
X_test = np.array([[-0.8, -1], [-2, -1]], dtype=np.float32)
model = LinearDiscriminantAnalysis().fit(X, y)
model_onnx = convert_sklearn(
model, "linear model",
[("input", FloatTensorType([None, X_test.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X_test,
model,
model_onnx,
basename="SklearnLinearDiscriminantAnalysisBin-Dec3",
# Operator cast-1 is not implemented in onnxruntime
allow_failure="StrictVersion(onnx.__version__)"
" < StrictVersion('1.3') or "
"StrictVersion(onnxruntime.__version__)"
" <= StrictVersion('0.2.1')",
)
示例12: test_model_logistic_linear_discriminant_analysis_decfunc
# 需要導入模塊: from sklearn import discriminant_analysis [as 別名]
# 或者: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 別名]
def test_model_logistic_linear_discriminant_analysis_decfunc(self):
X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
y = np.array([1, 1, 1, 2, 2, 2])
X_test = np.array([[-0.8, -1], [0, 1]], dtype=np.float32)
model = LinearDiscriminantAnalysis().fit(X, y)
model_onnx = convert_sklearn(
model, "linear model",
[("input", FloatTensorType([None, X_test.shape[1]]))],
options={id(model): {'raw_scores': True}})
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X_test, model, model_onnx,
basename="SklearnLinearDiscriminantAnalysisBinRawScore-Out0",
# Operator cast-1 is not implemented in onnxruntime
allow_failure="StrictVersion(onnx.__version__)"
" < StrictVersion('1.3') or "
"StrictVersion(onnxruntime.__version__)"
" <= StrictVersion('0.2.1')",
methods=['predict', 'decision_function']
)
示例13: test_model_logistic_linear_discriminant_analysis_decfunc3
# 需要導入模塊: from sklearn import discriminant_analysis [as 別名]
# 或者: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 別名]
def test_model_logistic_linear_discriminant_analysis_decfunc3(self):
X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
y = np.array([1, 1, 1, 2, 2, 3])
X_test = np.array([[-0.8, -1], [0, 1]], dtype=np.float32)
model = LinearDiscriminantAnalysis().fit(X, y)
model_onnx = convert_sklearn(
model, "linear model",
[("input", FloatTensorType([None, X_test.shape[1]]))],
options={id(model): {'raw_scores': True}})
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X_test, model, model_onnx,
basename="SklearnLinearDiscriminantAnalysisBinRawScore3-Out0",
# Operator cast-1 is not implemented in onnxruntime
allow_failure="StrictVersion(onnx.__version__)"
" < StrictVersion('1.3') or "
"StrictVersion(onnxruntime.__version__)"
" <= StrictVersion('0.2.1')",
methods=['predict', 'decision_function']
)
示例14: test_lda_coefs
# 需要導入模塊: from sklearn import discriminant_analysis [as 別名]
# 或者: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 別名]
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_lsqr = LinearDiscriminantAnalysis(solver="lsqr")
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
示例15: plot_lda
# 需要導入模塊: from sklearn import discriminant_analysis [as 別名]
# 或者: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 別名]
def plot_lda(self, group_ids, channels, sample=None, ax=None):
"""
Reduce dimensionality using LDA and plot data
"""
ax = plt if ax is None else ax
scores, labels = self.scores_for_groups(group_ids, channels)
lda = LDA(n_components=2)
reduced = lda.fit_transform(scores, labels)
for color in np.unique(group_ids).astype('int'):
x = reduced[labels == color, 0]
y = reduced[labels == color, 1]
if sample:
x = np.random.choice(x, size=int(sample*len(x)), replace=False)
y = np.random.choice(x, size=int(sample*len(y)), replace=False)
ax.scatter(x, y, label='Group {}'.format(color), alpha=0.7)
ax.legend()