本文整理汇总了Python中sklearn.feature_selection.SelectFwe.transform方法的典型用法代码示例。如果您正苦于以下问题:Python SelectFwe.transform方法的具体用法?Python SelectFwe.transform怎么用?Python SelectFwe.transform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.feature_selection.SelectFwe
的用法示例。
在下文中一共展示了SelectFwe.transform方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from sklearn.feature_selection import SelectFwe [as 别名]
# 或者: from sklearn.feature_selection.SelectFwe import transform [as 别名]
def main(args):
if args.train_dir is None:
# args.train_dir = '/a/fr-05/vol/protein/danofer/ProtFeat/feat_extract/chap/train/'
#args.train_dir = '/cs/prt3/danofer/ProtFeat/feat_extract/test_seq/NP/SPCleaved_NP-70+NEG-30_Big-V3/'
# args.train_dir = r'D:\SkyDrive\Dropbox\bioInf_lab\AA_info\CODE\feat_extract\test_seq\NP\SPCleaved_NP-70+NEG-30_Big-V3'
# args.train_dir = r'E:\Dropbox\Dropbox\bioInf_lab\AA_info\fastas\NP\SP_Cleaved+NP+Neg_Big'
args.train_dir = r'E:\Dropbox\Dropbox\bioInf_lab\AA_info\fastas\Benchmarks\Thermophiles'
print("Using default train_dir: %s" % args.train_dir)
pandas.set_option('display.max_columns', 10)
pandas.set_option('display.max_rows', 4)
# mpl.rc('title', labelsize=6)
mpl.rc('ytick', labelsize=7)
mpl.rc('xtick', labelsize=4)
os.chdir(args.train_dir)
dataName = 'Neuropeptides'
df = pandas.read_csv('trainingSetFeatures.csv')
feature_cols = [col for col in df.columns if col not in ['classname','Id','proteinname']]
feature_cols=numpy.array(feature_cols)
X = df[feature_cols].values
y = df.classname.values
le = LabelEncoder()
y = le.fit_transform(y)
"Initial feature selection trimming"
print(X.shape)
Fwe = SelectFwe(alpha=0.01).fit(X,y)
X=Fwe.transform(X)
print("F-test -> ",X.shape)
feature_cols=feature_cols[Fwe.get_support()]
'''
FeatSelection_SVM = True
if FeatSelection_SVM == True:
svc_L1 = LinearSVC(C=50, penalty="l1", dual=False,class_weight='auto').fit(X, y)
X = svc_L1.transform(X, y)
print ("L1 SVM Transformed X:",X_L1.shape)
feature_cols=feature_cols[list(set(np.where(svc_L1.coef_ != 0)[-1]))]
'''
k = SelectKBest(k=255).fit(X,y)
X=k.transform(X)
feature_cols=feature_cols[k.get_support()]
param_dist = {"max_depth": [6,9, None],
"max_features": ['auto',0.4],
"min_samples_leaf": [1,2,3],
"bootstrap": [True, False],
'min_samples_split':[2,3],
"criterion": [ "gini"],
"n_estimators":[100],
"n_jobs":[-1]}
rf = RandomForestClassifierWithCoef(max_depth= 7, min_samples_split= 1, min_samples_leaf= 2, n_estimators= 50, n_jobs= 2, max_features= "auto")
"WARNING! F1 Score as implemented by Default in binary classification (two classes) gives the score for 1 class."
scores = cross_validation.cross_val_score(rf,X,y,n_jobs=-1,cv=cross_validation.StratifiedShuffleSplit(y,n_iter=8,test_size=0.2))
print("X RF Accuracy: %0.3f (+- %0.2f)" % (scores.mean(), scores.std() * 2))
"Instead of scores_f1, we could also use precision, sensitivity, MCC (if binary), etc'."
scores_f1 = cross_validation.cross_val_score(rf,X,y,n_jobs=-1,cv=cross_validation.StratifiedShuffleSplit(y,n_iter=8,test_size=0.2),scoring='f1')
print("X RF f1: %0.3f (+- %0.2f)" % (scores_f1.mean(), scores_f1.std() * 2))
# rfeSelect = RFE(estimator=rf,n_features_to_select=16, step=0.04)
rfeSelect = RFECV(estimator=rf,step=20, cv=2,scoring='f1') #average_precision , recall
X_RFE = rfeSelect.fit_transform(X,y)
print(X_RFE.shape)
RFE_FeatureNames = feature_cols[rfeSelect.get_support()]
print(RFE_FeatureNames)
RFE_ScoreRatio = 100*(cross_validation.cross_val_score(rf,X_RFE,y,n_jobs=-1,cv=cross_validation.StratifiedShuffleSplit(y,n_iter=8,test_size=0.2),scoring='f1').mean())/scores_f1.mean()
print("Even with just",X_RFE.shape[1]," features, we have %f performance! (f1 score ratio)" %(RFE_ScoreRatio))
# PlotFeaturesImportance(X_RFE, y, RFE_FeatureNames, dataName)
print("Alt plot:")
altPlotFeaturesImportance(X_RFE, y, RFE_FeatureNames, dataName)
示例2: LabelEncoder
# 需要导入模块: from sklearn.feature_selection import SelectFwe [as 别名]
# 或者: from sklearn.feature_selection.SelectFwe import transform [as 别名]
# In[ ]:
X=df[feature_cols].values
y=df.classname.values
# In[ ]:
le = LabelEncoder()
y = le.fit_transform(y)
# In[ ]:
print("Orig X -> ",X.shape)
Fwe = SelectFwe(alpha=0.001).fit(X,y)
X=Fwe.transform(X)
print("F-test -> ",X.shape)
feature_cols=feature_cols[Fwe.get_support()]
# In[ ]:
rf = RandomForestClassifierWithCoef(max_depth= 9, min_samples_split= 3, min_samples_leaf= 3, n_estimators= 650, n_jobs= -1, max_features= "auto")
# In[ ]:
scores = cross_val_score(rf,X,y,n_jobs=-1,cv=StratifiedShuffleSplit(y,n_iter=7,test_size=0.3))
print("X RF Accuracy: %0.3f (+- %0.2f)" % (scores.mean(), scores.std() * 2))
# scores_f1 = cross_val_score(rf,X,y,n_jobs=-1,cv=StratifiedShuffleSplit(y,n_iter=10,test_size=0.22),scoring='f1')
# print("X RF f1: %0.3f (+- %0.2f)" % (scores_f1.mean(), scores_f1.std() * 2))
示例3: GetAllPerf
# 需要导入模块: from sklearn.feature_selection import SelectFwe [as 别名]
# 或者: from sklearn.feature_selection.SelectFwe import transform [as 别名]
def GetAllPerf (filePaths=None):
if filePaths is None:
filePaths = list(find_files(directory='./test_seq', pattern='trainingSetFeatures.csv'))
#Sanity check:
# filePaths=['/a/fr-05/vol/protein/danofer/ProtFeat/feat_extract/test_seq/Thermophile']
# filePaths=['./test_seq/NP/NP2/Train/trainingSetFeatures.csv']
print("FilePaths: \n",filePaths)
fileNames=fileNameFromPaths (filePaths)
print("FileNames:",fileNames)
resDict = pd.DataFrame(index=fileNames,
columns=['Accuracy','Accuracy_SD',
'f1','f1_SD','dummy_freq:Accuracy','dummy_freq:f1',
'LargestClassPercent','Classes',
# 'TopRFE-Features','Best (f1) Model parameters',
'# Classes',
'Array-Acc-Scores' ,'Array-f1-Scores'
,'bestML-Acc','bestML-f1','dummy_freq_f1_weighted'])
#redDict holds results for each file/class, for saving to output-file
i=-1
for filePath in filePaths:
i +=1
'http://pythonconquerstheuniverse.wordpress.com/2008/06/04/gotcha-%E2%80%94-backslashes-in-windows-filenames/'
filePath = os.path.normpath(filePath)
print(filePath)
fileName=str(fileNames[i]) #Str added now 14.1
print("fileName: %s" %(fileName))
"resDict['Name']= fileName"
# filePath = str(argv[1])
# X, y, lb_encoder,featureNames = load_data(filePath+fileName, 'file') # X, y = features, labels
X, y, lb_encoder,featureNames = load_data(filePath, 'file') # X, y = features, labels
print(X.shape,"= (samples, features)")
y_inv = Counter(lb_encoder.inverse_transform(y))
MajorityPercent = round(100*y_inv.most_common()[0][1]/sum(y_inv.values()),1)
print("Classes:", lb_encoder.classes_)
print("MajorityClassPercent:", MajorityPercent)
resDict.LargestClassPercent[fileName] = MajorityPercent
resDict.Classes[fileName] = str(lb_encoder.classes_)
resDict["# Classes"][fileName]=len(lb_encoder.classes_)
KFilt=None
KFilt=350 #This is just temporary for the outputs - saves computation time. Barely filters compared to the model itself.
if KFilt is not None:
k = SelectKBest(k=KFilt).fit(X,y)
X=k.transform(X)
featureNames=featureNames[k.get_support()]
Fwe = SelectFwe(alpha=0.01).fit(X,y)
X=Fwe.transform(X)
featureNames=featureNames[Fwe.get_support()]
print("X reduced to K best features: ",X.shape)
FeatSelection_SVM=False #Feature Names need updating!!
FeatSelection_RandLogReg=False
if FeatSelection_RandLogReg == True:
LogRegFeats = RandomizedLogisticRegression(C=10, scaling=0.5,
sample_fraction=0.95, n_resampling=40, selection_threshold=0.2,n_jobs=-1).fit(X,y)
X_L1 = LogRegFeats.transform(X)
featureNames=featureNames[LogRegFeats.get_support()]
print("RandomizedLogisticRegression Feature Selection ->:",X_L1.shape)
elif FeatSelection_SVM == True:
svc_L1= LinearSVC(C=30, penalty="l2", dual=False,class_weight='auto').fit(X, y)
X_L1 = svc_L1.transform(X, y)
featureNames=featureNames[list(set(np.where(svc_L1.coef_ != 0)[-1]))]
print ("L1 SVM Transformed X:",X_L1.shape)
# X=X_L1
'''
print("Performance as a function of percent of features used:")
PlotPerfPercentFeatures(X,y,est=LinearSVC())
'''
'EG - graph best features; feature selection using RF, ensemble classifiers..'
'http://nbviewer.ipython.org/github/herrfz/dataanalysis/blob/master/assignment2/samsung_data_prediction_submitted.ipynb'
RFE_FeatsToKeep = 16
FeatSelection_RFE=False
FeatSelection_RFECV=False
if (FeatSelection_RFE or FeatSelection_RFECV) == True:
'RFE + - best feats'
'http://scikit-learn.org/stable/auto_examples/plot_rfe_with_cross_validation.html '
svc = LinearSVC(class_weight='auto')#,penalty='l1',dual=False)
# svc = LogisticRegression(class_weight='auto')#,C=1)
#.........这里部分代码省略.........
示例4: print
# 需要导入模块: from sklearn.feature_selection import SelectFwe [as 别名]
# 或者: from sklearn.feature_selection.SelectFwe import transform [as 别名]
# plot the line, the points, and the nearest vectors to the plane
pl.plot(xx, yy, "k-")
pl.plot(xx, yy_down, "k--")
pl.plot(xx, yy_up, "k--")
pl.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80, facecolors="none")
pl.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired)
pl.axis("tight")
pl.show()
print "Training Accuracy"
# print clf.decision_function(x_train)
print (classification_report(y_train, clf.predict(x_train), target_names=target_names))
x_test = fs.transform(x_test)
print "Testing Accuracy"
print (classification_report(y_test, clf.predict(x_test), target_names=target_names))
decisions = clf.decision_function(x_test)
print "DECISION", decisions.shape[1]
# print y_test
X = np.array(decisions[:, 0])
# print X
Y = np.array(decisions[:, 2])
Z = np.array(decisions[:, 1])
points = []
for i, val in enumerate(X):
# print X[i], Y[i], Z[i]
points.append((X[i], Y[i], Z[i]))
points = list(set(points))