本文整理汇总了Python中sklearn.feature_selection方法的典型用法代码示例。如果您正苦于以下问题:Python sklearn.feature_selection方法的具体用法?Python sklearn.feature_selection怎么用?Python sklearn.feature_selection使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn
的用法示例。
在下文中一共展示了sklearn.feature_selection方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_clone
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import feature_selection [as 别名]
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert selector is not new_selector
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert selector is not new_selector
示例2: featuresFromFeatureSelection
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import feature_selection [as 别名]
def featuresFromFeatureSelection(X,Y,columnNames):
for f in columnNames:
print(f)
X_new_withfitTransform = SelectKBest(chi2, k=34).fit(X, Y)
colors = getColorNames()
counter = 0
scores = X_new_withfitTransform.scores_
scores_scaled = np.divide(scores, 1000)
for score in scores_scaled:
#if(score > 10):
#print('Feature {:>34}'.format(columnNames[counter]))
print('{:>34} '.format( score))
'''Plot a graph'''
plt.bar(counter, score,color=colors[counter])
counter +=1
plt.ylabel('Scores(1k)')
plt.title('Scores calculated by Chi-Square Test')
plt.legend(columnNames, bbox_to_anchor=(0., 0.8, 1., .102), loc=3,ncol=5, mode="expand", borderaxespad=0.)
plt.show()
#print(feature_selection.chi2(X,Y))
示例3: test_clone
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import feature_selection [as 别名]
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
示例4: test_clone_2
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import feature_selection [as 别名]
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert not hasattr(new_selector, "own_attribute")
示例5: test_clone_2
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import feature_selection [as 别名]
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))