本文整理匯總了Python中sklearn.feature_selection方法的典型用法代碼示例。如果您正苦於以下問題:Python sklearn.feature_selection方法的具體用法?Python sklearn.feature_selection怎麽用?Python sklearn.feature_selection使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sklearn
的用法示例。
在下文中一共展示了sklearn.feature_selection方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_clone
# 需要導入模塊: import sklearn [as 別名]
# 或者: from sklearn import feature_selection [as 別名]
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert selector is not new_selector
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert selector is not new_selector
示例2: featuresFromFeatureSelection
# 需要導入模塊: import sklearn [as 別名]
# 或者: from sklearn import feature_selection [as 別名]
def featuresFromFeatureSelection(X,Y,columnNames):
for f in columnNames:
print(f)
X_new_withfitTransform = SelectKBest(chi2, k=34).fit(X, Y)
colors = getColorNames()
counter = 0
scores = X_new_withfitTransform.scores_
scores_scaled = np.divide(scores, 1000)
for score in scores_scaled:
#if(score > 10):
#print('Feature {:>34}'.format(columnNames[counter]))
print('{:>34} '.format( score))
'''Plot a graph'''
plt.bar(counter, score,color=colors[counter])
counter +=1
plt.ylabel('Scores(1k)')
plt.title('Scores calculated by Chi-Square Test')
plt.legend(columnNames, bbox_to_anchor=(0., 0.8, 1., .102), loc=3,ncol=5, mode="expand", borderaxespad=0.)
plt.show()
#print(feature_selection.chi2(X,Y))
示例3: test_clone
# 需要導入模塊: import sklearn [as 別名]
# 或者: from sklearn import feature_selection [as 別名]
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
示例4: test_clone_2
# 需要導入模塊: import sklearn [as 別名]
# 或者: from sklearn import feature_selection [as 別名]
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert not hasattr(new_selector, "own_attribute")
示例5: test_clone_2
# 需要導入模塊: import sklearn [as 別名]
# 或者: from sklearn import feature_selection [as 別名]
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))