当前位置: 首页>>代码示例>>Python>>正文


Python LinearDiscriminantAnalysis.fit_transform方法代码示例

本文整理汇总了Python中sklearn.discriminant_analysis.LinearDiscriminantAnalysis.fit_transform方法的典型用法代码示例。如果您正苦于以下问题:Python LinearDiscriminantAnalysis.fit_transform方法的具体用法?Python LinearDiscriminantAnalysis.fit_transform怎么用?Python LinearDiscriminantAnalysis.fit_transform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.discriminant_analysis.LinearDiscriminantAnalysis的用法示例。


在下文中一共展示了LinearDiscriminantAnalysis.fit_transform方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: performLDA

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import fit_transform [as 别名]
def performLDA(data_to_fit, y, numComponent=None):
    data_to_fit_np_t = np.array(data_to_fit).T
    if numComponent is None:
        numComponent = len(data_to_fit_np_t)
    lda_model = LinearDiscriminantAnalysis(n_components=numComponent)
    lda_results = lda_model.fit_transform(data_to_fit_np_t, y)
    return lda_model, lda_results
开发者ID:anthonyho,项目名称:arrayAnalysisTools,代码行数:9,代码来源:CN_analysislib.py

示例2: assess_embedding

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import fit_transform [as 别名]
def assess_embedding(to_vec):
	"""
	Returns LDA classification score and projected data
	"""
	(x_data, y_data) = get_x_y_matrices(to_vec)

	lda = LDA(n_components=2)
	x_prime = lda.fit_transform(x_data, y_data)
	score = lda.score(x_data, y_data)

	return (x_prime.reshape(26, ), y_data, score)
开发者ID:bhtucker,项目名称:perceptron_viz,代码行数:13,代码来源:separability.py

示例3: transformLDA

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import fit_transform [as 别名]
def transformLDA(X,y,xTest):
    
    originalSize = np.size(X,1)
    print("Learning LDA \nProjecting {} features to 1 component".format(originalSize))
    priors = [0.5,0.5]

    clf = LinearDiscriminantAnalysis('svd', n_components=1,priors=priors)
    print(X.shape)
    X = clf.fit_transform(X,y)
    print("True size of X : ", X.shape)

    if xTest != []:
        xTest = clf.transform(xTest)
    return X,xTest
开发者ID:Mathieu-Seurin,项目名称:dat-eeg,代码行数:16,代码来源:manipulateData.py

示例4: run_LDA

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import fit_transform [as 别名]
def run_LDA(df):
    """
    Run LinearDiscriminantAnalysis on input dataframe (df) and return
    transformed data, scalings and
    """
    # Prep variables for sklearn LDA
    X = df[range(1, df.shape[1])].values     # input data matrix
    y = df["Condition"].values               # data categories list

    # Calculate LDA
    sklearn_lda = LDA()
    X_lda_sklearn = sklearn_lda.fit_transform(X, y)
    exp_var = sklearn_lda.explained_variance_ratio_

    return X_lda_sklearn, y, exp_var
开发者ID:vvaraljay,项目名称:phylotoast,代码行数:17,代码来源:LDA.py

示例5: run_LDA

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import fit_transform [as 别名]
def run_LDA(df):
    # Prep variables for sklearn LDA
    X = df[range(2, df.shape[1])].values     # input data matrix
    y = df['Condition'].values               # data categories list

    # Calculate LDA
    sklearn_lda = LDA(n_components=2)
    X_lda_sklearn = sklearn_lda.fit_transform(X, y)

    # Quality Test - can be ignored
#     print len(X_lda_sklearn)
#     print sklearn_lda.predict_proba(X)
#     print(sklearn_lda.score(X, y))

    return X_lda_sklearn, y
开发者ID:zhenjiaofenjie,项目名称:phylotoast,代码行数:17,代码来源:LDA.py

示例6: train_model

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import fit_transform [as 别名]
def train_model(csv_path):
    '''
    INPUT: 
    audio features csv with 'class' labels included

    OUTPUT:
    three pickled models stored in the models dir
    - StandardScaler (sklearn)
    - LinearDiscriminantAnalysis (sklearn)
    - SVC (sklearn)

    Takes an audio feature csv (created from 'feature_extraction.py') and returns pickled models to use
    '''
    csv = LOCAL_REPO_DIR + csv_path
    df = pd.read_csv(csv_path)

    # extracts X, y for training model from dataframe
    X = df.drop(['class', 'fold', 'Unnamed: 0'], axis=1).values
    y = df['class'].values

    # feature matrix has many different scales, need to standardize
    ss = StandardScaler()
    X = ss.fit_transform(X)

    lda = LinearDiscriminantAnalysis()
    X_lda = lda.fit_transform(X, y)

    # trains model using best performing model/hyperparameters using kfold grid search
    svm = SVC(C=1, gamma=0.04)
    svm.fit(X_lda, y)
    
    # accuracy check to make sure the model is performing
    y_pred_svm = svm.predict(X_lda)
    print 'model accuracy: ', accuracy_score(y, y_pred_svm)

    # cPickles models for later use
    with open(LOCAL_REPO_DIR + 'model/svm.pkl', 'wb') as f:
        cPickle.dump(svm, f)

    with open(LOCAL_REPO_DIR + 'model/lda.pkl', 'wb') as f:
        cPickle.dump(lda, f)

    with open(LOCAL_REPO_DIR + 'model/ss.pkl', 'wb') as f:
        cPickle.dump(ss, f)
开发者ID:kmlee17,项目名称:citysounds,代码行数:46,代码来源:train_model.py

示例7: run_LDA

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import fit_transform [as 别名]
def run_LDA(df):
    """
    Run LinearDiscriminantAnalysis on input dataframe (df) and return
    transformed data, scalings and explained variance by discriminants.
    """
    # Prep variables for sklearn LDA
    X = df.iloc[:, 1:df.shape[1]].values     # input data matrix
    y = df["Condition"].values               # data categories list

    # Calculate LDA
    sklearn_lda = LDA()
    X_lda_sklearn = sklearn_lda.fit_transform(X, y)
    try:
        exp_var = sklearn_lda.explained_variance_ratio_
    except AttributeError as ae:
        print("\n{}: explained variance cannot be computed.\nPlease check this GitHub PR:"
              " https://github.com/scikit-learn/scikit-learn/pull/6027".format(ae))
        return X_lda_sklearn, y, "NA"
    return X_lda_sklearn, y, exp_var
开发者ID:akshayparopkari,项目名称:phylotoast,代码行数:21,代码来源:LDA.py

示例8: fit_svm

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import fit_transform [as 别名]
def fit_svm(prints):
    print "Fitting to SVM...."
    dataframe = pd.DataFrame(prints)
    y = dataframe[2]
    X = dataframe[0]

    # in case feature matrix has many different scales, need to standardize
    ss = StandardScaler()
    X = ss.fit_transform(X)


    lda = LinearDiscriminantAnalysis()
    X_lda = lda.fit_transform(X_1, y)

    # trains model using best performing model/hyperparameters using kfold grid search
    svm = SVC(C=1, gamma=0.04)
    svm.fit(X_lda, y)

    pickle_model(svm, 'svm')
开发者ID:digideskio,项目名称:A_Cappella,代码行数:21,代码来源:train.py

示例9: plot_sklearn_lda_with_lr

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import fit_transform [as 别名]
def plot_sklearn_lda_with_lr(X_train, X_test, y_train, y_test):
    lda = LDA(n_components=2)
    X_train_lda = lda.fit_transform(X_train, y_train)

    lr = LogisticRegression()
    lr = lr.fit(X_train_lda, y_train)

    plot_decision_regions(X_train_lda, y_train, classifier=lr)
    plt.xlabel('LD 1')
    plt.ylabel('LD 2')
    plt.legend(loc='lower left')
    plt.show()

    X_test_lda = lda.transform(X_test)

    plot_decision_regions(X_test_lda, y_test, classifier=lr)
    plt.xlabel('LD 1')
    plt.ylabel('LD 2')
    plt.legend(loc='lower left')
    plt.show()
开发者ID:jeremyn,项目名称:python-machine-learning-book,代码行数:22,代码来源:chapter_5.py

示例10: project_back

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import fit_transform [as 别名]
def project_back(x,digits):
    myLDA = LDA()
    new_train = myLDA.fit_transform(x.PCA[:,:154],digits.train_Labels)
    print(new_train.shape)
    m = 0
    n = 1
    plt.figure()
    plt.scatter(new_train[digits.train_Labels == 0,m],new_train[digits.train_Labels == 0,n], color='Green', s= 1)
    plt.scatter(new_train[digits.train_Labels == 1,m],new_train[digits.train_Labels == 1,n], color='Blue', s= 1)
    plt.scatter(new_train[digits.train_Labels == 2,m],new_train[digits.train_Labels == 2,n], color='Red', s= 1)
    plt.scatter(new_train[digits.train_Labels == 3,m],new_train[digits.train_Labels == 3,n], color='Purple', s= 1)
    plt.scatter(new_train[digits.train_Labels == 4,m],new_train[digits.train_Labels == 4,n], color='Black', s= 1)
    plt.scatter(new_train[digits.train_Labels == 5,m],new_train[digits.train_Labels == 5,n], color='Brown', s= 1)
    plt.scatter(new_train[digits.train_Labels == 6,m],new_train[digits.train_Labels == 6,n], color='Silver', s= 1)
    plt.scatter(new_train[digits.train_Labels == 7,m],new_train[digits.train_Labels == 7,n], color='Cyan', s= 1)
    plt.show()
    y = [email protected]_[:9,:] # I really don't know if this will work since there are 10 coef things
    weighted_y2 = y[:,:154]@x.V[:154,:] + x.centers
    plt.imshow(weighted_y2[0,:].reshape(28,28))
    plt.show()
开发者ID:AndrewZastovnik,项目名称:Math-285-hw2,代码行数:22,代码来源:HW2PB5.py

示例11: do_LDA2D_KNN

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import fit_transform [as 别名]
def do_LDA2D_KNN(digits,p,q):
    l,r = LDA2D.iterative2DLDA(digits.train_Images, digits.train_Labels, p, q, 28, 28)

    new_train = np.zeros((digits.train_Images.shape[0],p*q))
    for i in range(digits.train_Images.shape[0]):
        new_train[i] = (np.transpose(l)@digits.train_Images[i].reshape(28,28)@r).reshape(p*q)
    new_test = np.zeros((digits.test_Images.shape[0],p*q))
    for i in range(digits.test_Images.shape[0]):
        new_test[i] = (np.transpose(l)@digits.test_Images[i].reshape(28,28)@r).reshape(p*q)
    myLDA = LDA()
    x = center_matrix_SVD(new_train)
    new_new_train = myLDA.fit_transform(new_train-x.centers,digits.train_Labels)
    new_new_test = myLDA.transform(new_test-x.centers)
    labels, nearest = KNN(new_new_train,digits.train_Labels,new_new_test,10,'euclidean')
    pickle.dump(labels, open('LDA2DFDA'+ str(p) + 'x' + str(q) + '_EU.p','wb'))
    #pickle.dump(nearest, open('NLDA2DFDA'+ str(p) + 'x' + str(q) + '_EU.p','wb'))
    labels, nearest = KNN(new_new_train,digits.train_Labels,new_new_test,10,'cityblock')
    pickle.dump(labels, open('LDA2DFDA'+ str(p) + 'x' + str(q) + '_CB.p','wb'))
    #pickle.dump(nearest, open('NLDA2DFDA'+ str(p) + 'x' + str(q) + '_CB.p','wb'))
    labels, nearest = KNN(new_new_train,digits.train_Labels,new_new_test,10,'cosine')
    pickle.dump(labels, open('LDA2DFDA'+ str(p) + 'x' + str(q) + '_CO.p','wb'))
开发者ID:AndrewZastovnik,项目名称:Math-285-Hw3,代码行数:23,代码来源:Problem5.py

示例12: apply

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import fit_transform [as 别名]
    def apply(self):
        transformed = components = None
        if self.data is not None:
            self.data = Continuize(Impute(self.data))
            lda = LinearDiscriminantAnalysis(solver='eigen', n_components=2)
            X = lda.fit_transform(self.data.X, self.data.Y)
            dom = Domain([ContinuousVariable('Component_1'),
                          ContinuousVariable('Component_2')],
                         self.data.domain.class_vars, self.data.domain.metas)
            transformed = Table(dom, X, self.data.Y, self.data.metas)
            transformed.name = self.data.name + ' (LDA)'
            dom = Domain(self.data.domain.attributes,
                         metas=[StringVariable(name='component')])
            metas = np.array([['Component_{}'.format(i + 1)
                                  for i in range(lda.scalings_.shape[1])]],
                                dtype=object).T
            components = Table(dom, lda.scalings_.T, metas=metas)
            components.name = 'components'

        self.send("Transformed data", transformed)
        self.send("Components", components)
开发者ID:pavlin-policar,项目名称:orange3-prototypes,代码行数:23,代码来源:owlda.py

示例13: leave_one_out

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import fit_transform [as 别名]
def leave_one_out(feature_dict, glob, classifier, title):
    # feature_dict is a dictionary of feature names and a triple of booleans defining
    # which summary metrics to include respectively: (mean, std, measurewise)
    all_features = glob.get_features(feature_dict)
    all_classes = glob.get_feature('class', (True, True, True))
    
    class_pred, class_real = [], []
    
    vis.print_stars(newline=True)
    print("Testing " + title + " classification with features:")
    print(list(feature_dict.keys()))
    vis.print_dashes()
    sys.stdout.write("\r0 / %d samples processed (...)" % len(all_features))
    
    pca = LinearDiscriminantAnalysis()
    all_features = pca.fit_transform(all_features, all_classes.ravel())
    start = time.clock()
    
    for idx in range(len(all_features)):
        train_features = np.delete(all_features, idx, 0)
        train_classes = np.delete(all_classes, idx, 0)
        
        test_feature = np.transpose(all_features[idx,:]).reshape((1, train_features.shape[1]))
        test_class = np.transpose(all_classes[idx,:])
        
        predicted_class = classify(train_features, train_classes, test_feature, classifier)
        
        class_pred.append(predicted_class)
        class_real.append(genre_from_int(test_class))
    
        t = time.clock() - start
        time_per_iteration = t / (idx + 1)
        remaining = time_per_iteration * (len(all_features) - (idx + 1))
        
        sys.stdout.write("\r%d / %d samples processed (%02d:%02d:%02d left)" % 
            ((idx + 1), len(all_features), remaining / 3600, (remaining / 60) % 60, remaining % 60))
    
    return [class_pred, class_real]
开发者ID:emitch,项目名称:music-classifier,代码行数:40,代码来源:classify.py

示例14: main

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import fit_transform [as 别名]
def main():
    digits = mnist() # Creates a class with our mnist images and labels
    if open('Training SVD Data','rb')._checkReadable() == 0: # Check if file exist create it if it doesn't
        x = center_matrix_SVD(digits.train_Images) # Creates a class with our svd and associated info
        pickle.dump(x,open('Training SVD Data','wb'))
    else:
        x = pickle.load(open('Training SVD Data','rb'))  # If we already have the file just load it
    if 1: # if this is zero skip
        test_Images_Center = np.subtract(digits.test_Images,np.repeat(x.centers,digits.test_Images.shape[0],0))
        tic()
        myLDA = LDA()  # Create a new instance of the LDA class
        new_train = myLDA.fit_transform(x.PCA[:,:154],digits.train_Labels)  # It will fit based on x.PCA
        new_test = myLDA.transform([email protected](x.V[:154,:])) # get my transformed test dataset
        Knn_labels = local_kmeans_class(new_train,digits.train_Labels,new_test,10) # Run kNN on the new data
        toc()
        pickle.dump(Knn_labels,open('Loc_kmeans_fda_lab','wb'))

    fda = pickle.load(open('Loc_kmeans_fda_lab','rb'))
    labels_Full = pickle.load(open('KNN_Full','rb'))
    loc_full = pickle.load(open('Loc_kmeans_Full_lab','rb'))
    errors_fda,ind_fda = class_error_rate(np.transpose(fda),digits.test_labels)
    errors_near,ind_near = class_error_rate(labels_Full,digits.test_labels)
    errors_full,ind_full = class_error_rate(np.transpose(loc_full),digits.test_labels)
    labels_50 = pickle.load(open('KNN_50','rb'))
    errors_50,ind_50 = class_error_rate(labels_50,digits.test_labels)
    print(errors_full)
    plt.figure()
    plt.plot(np.arange(10)+1, errors_fda, color='Green', marker='o', markersize=10, label='fda Kmeans')  #plots the 82.5%
    plt.plot(np.arange(10)+1, errors_near, color='Blue', marker='o', markersize=10, label='kNN')
    plt.plot(np.arange(10)+1, errors_full, color='Yellow', marker='o', markersize=10, label='Full Kmeans')
    plt.plot(np.arange(10)+1, errors_50, color='Red', marker='o', markersize=10, label='kNN 50')
    axes = plt.gca()
    axes.set_ylim([0.015,0.12])
    plt.grid(1) # Turns the grid on
    plt.title('Plot of Local Kmeans with FDA Error rates')
    plt.legend(loc='upper right')  # Puts a legend on the plot
    plt.show()
    project_back(x,digits)
开发者ID:AndrewZastovnik,项目名称:Math-285-hw2,代码行数:40,代码来源:HW2PB5.py

示例15: dimension_reduce

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import fit_transform [as 别名]
 def dimension_reduce(self,mode='L'):
     
     print 'Reduce Dimensions...'
     print 'Start:' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
     
     raw_train=self.train.copy()
     train=self.train.copy()
     train_label=self.train_label['label'].values.copy()
     train_label=train_label.reshape((train_label.shape[0]))
         
     test=self.test.copy()
     test_label=self.test_label['label'].values.copy()
     test_label=test_label.reshape((test_label.shape[0]))
     
     flist=train.columns
     
     if mode.upper()=='L':
         lda=LinearDiscriminantAnalysis()
         X_new=lda.fit_transform(train.values,train_label)
         self.train=pd.DataFrame(X_new,columns=['DR'])
         self.test=pd.DataFrame(lda.transform(test[flist].values),columns=['DR'])
         
         tt=lda.coef_[0]
         ind=np.argsort(tt)
         features=raw_train.columns[ind[-100:]]
         feas=pd.DataFrame()
         feas['feature']=features
         feas['values']=tt[ind[-100:]]
         return feas
         
     elif mode.upper()=='P':
         pca = PCA(n_components=100)
         X_new=pca.fit_transform(train.values,train_label)
         self.train=pd.DataFrame(X_new)
         self.test=pd.DataFrame(pca.transform(test[flist].values))
         
     print 'End:' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
开发者ID:JackMeiLong,项目名称:ML.Practise,代码行数:39,代码来源:framework_v1.py


注:本文中的sklearn.discriminant_analysis.LinearDiscriminantAnalysis.fit_transform方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。