当前位置: 首页>>代码示例>>Python>>正文


Python LinearDiscriminantAnalysis.predict方法代码示例

本文整理汇总了Python中sklearn.discriminant_analysis.LinearDiscriminantAnalysis.predict方法的典型用法代码示例。如果您正苦于以下问题:Python LinearDiscriminantAnalysis.predict方法的具体用法?Python LinearDiscriminantAnalysis.predict怎么用?Python LinearDiscriminantAnalysis.predict使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.discriminant_analysis.LinearDiscriminantAnalysis的用法示例。


在下文中一共展示了LinearDiscriminantAnalysis.predict方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: computing_performance_LDA

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import predict [as 别名]
def computing_performance_LDA(in_path=None, seeds=list([0])):
    def u65(mod_Y):
        return 1.6 / mod_Y - 0.6 / mod_Y ** 2

    def u80(mod_Y):
        return 2.2 / mod_Y - 1.2 / mod_Y ** 2

    data = export_data_set('iris.data') if in_path is None else pd.read_csv(in_path)
    print("-----DATA SET TRAINING---", in_path)
    X = data.iloc[:, :-1].values
    y = data.iloc[:, -1].tolist()
    lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
    mean_u65, mean_u80 = 0, 0
    n_times = len(seeds)
    for k in range(0, n_times):
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=seeds[k])
        sum_u65, sum_u80 = 0, 0
        lda.fit(X_train, y_train)
        n, _ = X_test.shape
        for i, test in enumerate(X_test):
            evaluate = lda.predict([test])
            print("-----TESTING-----", i)
            if y_test[i] in evaluate:
                sum_u65 += u65(len(evaluate))
                sum_u80 += u80(len(evaluate))
        print("--k-->", k, sum_u65 / n, sum_u80 / n)
        mean_u65 += sum_u65 / n
        mean_u80 += sum_u80 / n
    print("--->", mean_u65 / n_times, mean_u80 / n_times)
开发者ID:sdestercke,项目名称:classifip,代码行数:31,代码来源:qdatest.py

示例2: main

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import predict [as 别名]
def main():
    """Read Train/test log."""
    df = pd.read_csv("train.csv")

    # train/test split using stratified sampling
    labels = df['label']
    df = df.drop(['label'], 1)
    sss = StratifiedShuffleSplit(labels, 10, test_size=0.2, random_state=23)
    for train_index, test_index in sss:
        x_train, x_test = df.values[train_index], df.values[test_index]
        y_train, y_test = labels[train_index], labels[test_index]

    # classification algorithm
    classification(x_train, y_train, x_test, y_test)

    # Predict Test Set
    favorite_clf = LinearDiscriminantAnalysis()
    favorite_clf.fit(x_train, y_train)
    test = pd.read_csv('test.csv')
    test_predictions = favorite_clf.predict(test)
    print test_predictions

    # Format DataFrame
    submission = pd.DataFrame(test_predictions, columns=['Label'])
    submission.tail()
    submission.insert(0, 'ImageId', np.arange(len(test_predictions)) + 1)
    submission.reset_index()
    submission.tail()

    # Export Submission
    submission.to_csv('submission.csv', index=False)
    submission.tail()
开发者ID:ishmnnit,项目名称:Kaggle,代码行数:34,代码来源:digit.py

示例3: computing_cv_accuracy_LDA

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import predict [as 别名]
def computing_cv_accuracy_LDA(in_path=None, cv_n_fold=10):
    def u65(mod_Y):
        return 1.6 / mod_Y - 0.6 / mod_Y ** 2

    def u80(mod_Y):
        return 2.2 / mod_Y - 1.2 / mod_Y ** 2

    from sklearn.discriminant_analysis import LinearDiscriminantAnalysis

    data = export_data_set('iris.data') if in_path is None else pd.read_csv(in_path)
    print("-----DATA SET TRAINING---", in_path)
    X = data.iloc[:, :-1].values
    y = np.array(data.iloc[:, -1].tolist())
    kf = KFold(n_splits=cv_n_fold, random_state=None, shuffle=True)
    lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
    mean_u65, mean_u80 = 0, 0
    for idx_train, idx_test in kf.split(y):
        print("---k-FOLD-new-executing--")
        X_cv_train, y_cv_train = X[idx_train], y[idx_train]
        X_cv_test, y_cv_test = X[idx_test], y[idx_test]
        lda.fit(X_cv_train, y_cv_train)
        n_test = len(idx_test)
        sum_u65, sum_u80 = 0, 0
        for i, test in enumerate(X_cv_test):
            evaluate = lda.predict([test])
            print("-----TESTING-----", i)
            if y_cv_test[i] in evaluate:
                sum_u65 += u65(len(evaluate))
                sum_u80 += u80(len(evaluate))
        mean_u65 += sum_u65 / n_test
        mean_u80 += sum_u80 / n_test
    print("--->", mean_u65 / cv_n_fold, mean_u80 / cv_n_fold)
开发者ID:sdestercke,项目名称:classifip,代码行数:34,代码来源:qdatest.py

示例4: LinearDiscriminantAnalysiscls

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import predict [as 别名]
class LinearDiscriminantAnalysiscls(object):
    """docstring for ClassName"""
    def __init__(self):
        self.lda_cls = LinearDiscriminantAnalysis()
        self.prediction = None
        self.train_x = None
        self.train_y = None

    def train_model(self, train_x, train_y):
        try:
            self.train_x = train_x
            self.train_y = train_y
            self.lda_cls.fit(train_x, train_y)
        except:
            print(traceback.format_exc())

    def predict(self, test_x):
        try:
            self.test_x = test_x
            self.prediction = self.lda_cls.predict(test_x)
            return self.prediction
        except:
            print(traceback.format_exc())

    def accuracy_score(self, test_y):
        try:
            # return r2_score(test_y, self.prediction)
            return self.lda_cls.score(self.test_x, test_y)
        except:
            print(traceback.format_exc())
开发者ID:obaid22192,项目名称:machine-learning,代码行数:32,代码来源:classifiers.py

示例5: doLDA

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import predict [as 别名]
def doLDA(x,digits,s):
    myLDA = LDA()
    myLDA.fit(x.PCA[:,:s],digits.train_Labels)
    newtest = digits.test_Images -x.centers
    [email protected](x.V[:s,:])
    labels = myLDA.predict(newtest)
    errors = class_error_rate(labels.reshape(1,labels.shape[0]),digits.test_Labels)
    return errors
开发者ID:AndrewZastovnik,项目名称:Math-285-Hw3,代码行数:10,代码来源:Problem1.py

示例6: train_model

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import predict [as 别名]
    def train_model(self):
        ### Train spectrum data
        # form training data and labels
        X = np.empty((0, self.freq_cutoff), int)
        y = np.empty((0, 1), int)

        data_dir = 'clap_data/claps/spectrum/'
        for fname in os.listdir(data_dir):
            data = np.load("%s%s"% (data_dir, fname))
            X = np.append(X, data, axis=0)
            y = np.append(y, [1] * data.shape[0])

        data_dir = 'clap_data/noclaps/spectrum/'
        for fname in os.listdir(data_dir):
            data = np.load("%s%s"% (data_dir, fname))
            X = np.append(X, data, axis=0)
            y = np.append(y, [0] * data.shape[0])

        # pca = PCA(n_components=200)
        # X_pca = pca.fit_transform(X)

        # fit the model
        # clf = LogisticRegression(penalty='l1')
        clf = LinearDiscriminantAnalysis()
        clf.fit(X, y)
        preds = clf.predict(X)
        # X_new = clf.transform(X)

        # clf2 = LinearDiscriminantAnalysis()
        # clf2.fit(X_new, y)
        # preds2 = clf2.predict(X_new)

        # print X.shape, X_pca.shape
        print preds
        print np.sum(preds), preds.size
        # print preds2, np.sum(preds2)

        # save model
        pickle.dump(clf, open(clap_model_dir + clap_classifier_fname, 'w'))
        self.clap_clf = clf

        ### Train decay data
        X = np.empty((0, self.decay_samples/10), int)

        data_dir = 'clap_data/claps/decay/'
        for fname in os.listdir(data_dir):
            if fname.endswith('npy'):
                data = np.load("%s%s"% (data_dir, fname))
                print data.shape, X.shape
                X = np.append(X, data, axis=0)

        print X.shape
        X_avg = np.mean(X, axis=0)
        plt.plot(X_avg)
        plt.show()

        # Average decay data
        np.save('%s%s' % (clap_model_dir, clap_decay_model_fname), X_avg)
开发者ID:mzw4,项目名称:MorningAssistant,代码行数:60,代码来源:clap.py

示例7: testEvaluateLDA

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import predict [as 别名]
 def testEvaluateLDA(self, trCList, teCList):
     # LDA object
     clf = LinearDiscriminantAnalysis()
     # fit lda model using training chromosomes
     clf.fit(numpy.asarray(trCList), numpy.asarray(trainGroupings))
     
     predicted = clf.predict(teCList)
         
     self.confusionMatrix(testGroupings, predicted, 'lda_test')
     
     # return precision ([0]), recall ([1]) or f1 score ([2]), replace with clf.score(numpy.asarray(teCList), testGroupings) for accuracy
     return precision_recall_fscore_support(testGroupings, predicted, average = 'weighted')[2] # fitness for test set
开发者ID:MatthewCarse,项目名称:evolve,代码行数:14,代码来源:machineLearning.py

示例8: train_DA

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import predict [as 别名]
    def train_DA(self, X, y, lda_comp, qda_reg):
        '''
        Input: 
            qda_reg - reg_param
            lda_comp - n_components
            X - data matrix (train_num, feat_num)
            y - target labels matrix (train_num, label_num)

        Output: 
            best_clf - best classifier trained (QDA/LDA)
            best_score - CV score of best classifier

        Find best DA classifier.
        '''
        n_samples, n_feat = X.shape
        cv_folds = 10
        kf = KFold(n_samples, cv_folds, shuffle=False)

        
        
        lda = LinearDiscriminantAnalysis(n_components = lda_comp)
        qda = QuadraticDiscriminantAnalysis(reg_param = qda_reg)
        score_total_lda = 0 #running total of metric score over all cv runs
        score_total_qda = 0 #running total of metric score over all cv runs
        for train_index, test_index in kf:
            X_train, X_test = X[train_index], X[test_index]
            y_train, y_test = y[train_index], y[test_index]
            
            lda.fit(X_train, y_train)
            cv_pred_lda = lda.predict(X_test)
            score_lda = eval(self.metric + '(y_test[:,None], cv_pred_lda[:,None], "' + self.task + '")')
            score_total_lda += score_lda
            
            qda.fit(X_train,y_train)
            cv_pred_qda = qda.predict(X_test)
            score_qda = eval(self.metric + '(y_test[:,None], cv_pred_lda[:,None], "' + self.task + '")')
            score_total_qda += score_qda

        score_lda = score_total_lda/cv_folds
        score_qda = score_total_qda/cv_folds
        
        # We keep the best one
        if(score_qda > score_lda):
            qda.fit(X,y)
            return qda, score_qda
        else:
            lda.fit(X,y)
            return lda, score_lda
开发者ID:ludovicth,项目名称:chalearn,代码行数:50,代码来源:myautoml.py

示例9: computing_precise_vs_imprecise

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import predict [as 别名]
def computing_precise_vs_imprecise(in_path=None, ell_optimal=0.1, seeds=0):
    def u65(mod_Y):
        return 1.6 / mod_Y - 0.6 / mod_Y ** 2

    def u80(mod_Y):
        return 2.2 / mod_Y - 1.2 / mod_Y ** 2

    data = export_data_set('iris.data') if in_path is None else pd.read_csv(in_path)
    print("-----DATA SET TRAINING---", in_path)
    X = data.iloc[:, :-1].values
    y = data.iloc[:, -1].tolist()
    n_time = len(seeds)
    lda_imp = LinearDiscriminant(init_matlab=True)
    lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
    mean_u65_imp, mean_u80_imp, u_mean = 0, 0, 0
    for k in range(0, n_time):
        X_train, X_test, y_train, y_test = \
            train_test_split(X, y, test_size=0.4, random_state=seeds[k])
        lda_imp.learn(X_train, y_train, ell=ell_optimal)
        lda.fit(X_train, y_train)
        sum_u65, sum_u80 = 0, 0
        u_precise, n_real_test = 0, 0
        n_test, _ = X_test.shape
        for i, test in enumerate(X_test):
            print("--TESTING-----", i)
            evaluate_imp, _ = lda_imp.evaluate(test)
            if len(evaluate_imp) > 1:
                n_real_test += 1
                if y_test[i] in evaluate_imp:
                    sum_u65 += u65(len(evaluate_imp))
                    sum_u80 += u80(len(evaluate_imp))
                evaluate = lda.predict([test])
                if y_test[i] in evaluate:
                    u_precise += u80(len(evaluate))
        mean_u65_imp += sum_u65 / n_real_test
        mean_u80_imp += sum_u80 / n_real_test
        u_mean += u_precise / n_real_test
        print("--time_k--u65-->", k, sum_u65 / n_real_test)
        print("--time_k--u80-->", k, sum_u80 / n_real_test)
        print("--time_k--precise-->", k, u_precise / n_real_test)
    print("--global--u65-->", mean_u65_imp / n_time)
    print("--global--u80-->", mean_u80_imp / n_time)
    print("--global--precise-->", u_mean / n_time)
开发者ID:sdestercke,项目名称:classifip,代码行数:45,代码来源:qdatest.py

示例10: lda_pred

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import predict [as 别名]
def lda_pred(Xtrain, Xtest, Ytrain, Ytest):
    """ Simple Naive Implementation of the the LDA
    """
    # empty list for the predictions
    Ypred = []
    
    # loop through and perform classification
    for xtrain, xtest, ytrain, ytest in zip(Xtrain,Xtest,
                                            Ytrain, Ytest):
        # initialize the model                
        lda_model = LDA()
        
        # fit the model to the training data
        lda_model.fit(xtrain, ytrain.ravel())
        
        # save the results of the model predicting the testing data
        Ypred.append(lda_model.predict(xtest))
    
    # return this list    
    return Ypred    
开发者ID:jejjohnson,项目名称:manifold_learning,代码行数:22,代码来源:classification_list.py

示例11: classifyLDA

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import predict [as 别名]
    def classifyLDA(self, tCList, vCList):
        if self.mode == "cv":
            # LDA object
            clf = make_pipeline(preprocessing.StandardScaler(), LinearDiscriminantAnalysis())
            predicted = cross_validation.cross_val_predict(clf, tCList, trainGroupings, cv=3)

            if self.cm:
                self.confusionMatrix(trainGroupings, predicted, 'lda_cv')
            
            return precision_recall_fscore_support(trainGroupings, predicted, average = 'weighted')[2]
            
        else:
            clf = LinearDiscriminantAnalysis()
            # fit lda model using training chromosomes
            clf.fit(numpy.asarray(tCList), numpy.asarray(trainGroupings))
            
            if self.cm:
                self.confusionMatrix(validGroupings, predicted, 'lda_valid')
            
            # return precision ([0]), recall ([1]) or f1 score ([2]), replace with clf.score(numpy.asarray(vCList), validGroupings) for accuracy
            return precision_recall_fscore_support(validGroupings, clf.predict(numpy.asarray(vCList)), average = 'weighted')[2] # fitness for validation set
开发者ID:MatthewCarse,项目名称:evolve,代码行数:23,代码来源:machineLearning.py

示例12: processTraining

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import predict [as 别名]
def processTraining(cvtrainx,cvtrainy,cvevalx,prob=False):
    print cvtrainx[0]
    #cvevalx=[' '.join(s) for s in cvevalx]
    print cvevalx[0]
    tfv = TfidfVectorizer(min_df=10,  max_features=None,
        strip_accents='unicode', analyzer=mytokenlizer,
        ngram_range=(1, 5), use_idf=1,smooth_idf=1,sublinear_tf=1,
        stop_words = 'english')

    cvtrainx=tfv.fit_transform(cvtrainx)
    cvevalx=tfv.transform(cvevalx)
    tsvd=TruncatedSVD(n_components=600,random_state=2016)
    cvtrainx=tsvd.fit_transform(cvtrainx)
    cvevalx=tsvd.transform(cvevalx)
    print len(tfv.get_feature_names())
    print tfv.get_feature_names()[0:10]
    clf=LinearDiscriminantAnalysis()
    clf.fit(cvtrainx,cvtrainy)
    if prob:
        predictValue=clf.predict_proba(cvevalx)
    else:
        predictValue=clf.predict(cvevalx)
    return predictValue
开发者ID:StevenLOL,项目名称:aicyber_semeval_2016_ivector,代码行数:25,代码来源:system_2_baseline.py

示例13: RobustScaler

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import predict [as 别名]
devtest='./exp/ivectors_semeval_devtest_NGMM_2048_W_2_DIM_200/feats.txt'
dev='./exp/ivectors_semeval_dev_NGMM_2048_W_2_DIM_200/feats.txt'
train='./exp/ivectors_semeval_train_NGMM_2048_W_2_DIM_200/feats.txt'



trainy,trainx=imdb_bag_of_word_libs.loadFeatsText(train)
trainy=imdb_bag_of_word_libs.kaldiID_2_LB(trainy)
evaly,evalx=imdb_bag_of_word_libs.loadFeatsText(dev)
evaly=imdb_bag_of_word_libs.kaldiID_2_LB(evaly)

evaly2,evalx2=imdb_bag_of_word_libs.loadFeatsText(devtest)
evaly2=imdb_bag_of_word_libs.kaldiID_2_LB(evaly2)


robust_scaler = RobustScaler()
trainx=robust_scaler.fit_transform(trainx)
evalx=robust_scaler.transform(evalx)

clf= LinearDiscriminantAnalysis() #
clf.fit(trainx,trainy)
predictValue=clf.predict(evalx)

print semeval2016_libs.scoreSameOrder(predictValue,configure.SCORE_REF_DEV)

evalx2=robust_scaler.transform(evalx2)
predictValue=clf.predict(evalx2)


print semeval2016_libs.scoreSameOrder(predictValue,configure.SCORE_REF_DEVTEST)
开发者ID:StevenLOL,项目名称:aicyber_semeval_2016_ivector,代码行数:32,代码来源:0042_test_ivector_SemEval2016.py

示例14: print

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import predict [as 别名]
label_e = ["ell"]*ell.shape[0]
label_v = ["vox"]*vox.shape[0]
label_w = ["wtr"]*wtr.shape[0]
label_r = ["rig"]*rig.shape[0]
label_c = ["con"]*(ell.shape[0]+vox.shape[0])


print()
print("CONTINUOUS VS. RIGID")
print("Training data: ellipse/voxel vs rigid...")
trainingSet = np.vstack((ell, vox, rig)).tolist()
labels = label_c + label_r
clf = LinearDiscriminantAnalysis()
clf.fit(trainingSet, labels)
print("Testing on wild type...")
predictions = clf.predict(wtr.tolist())
count = 0
for prediction in predictions:
    if (prediction=="con"):
        count+=1
print("Number of continuous predictions: "+str(count)+"/"+str(wtr.shape[0]))

print()
print("ELLIPSE VS. RIGID")
print("Training data: ellipse vs. rigid...")
trainingSet = np.vstack((ell, rig)).tolist()
labels = label_e + label_r
clf = LinearDiscriminantAnalysis()
clf.fit(trainingSet, labels)
print("Testing on voxels...")
predictions = clf.predict(vox.tolist())
开发者ID:adityamukund,项目名称:proteinholes,代码行数:33,代码来源:lda.py

示例15: LDA

# 需要导入模块: from sklearn.discriminant_analysis import LinearDiscriminantAnalysis [as 别名]
# 或者: from sklearn.discriminant_analysis.LinearDiscriminantAnalysis import predict [as 别名]
import seaborn as sns

# Dataset
n_samples, n_features = 100, 2
mean0, mean1 = np.array([0, 0]), np.array([0, 2])
Cov = np.array([[1, .8],[.8, 1]])
np.random.seed(42)
X0 = np.random.multivariate_normal(mean0, Cov, n_samples)
X1 = np.random.multivariate_normal(mean1, Cov, n_samples)
X = np.vstack([X0, X1])
y = np.array([0] * X0.shape[0] + [1] * X1.shape[0])

# LDA with scikit-learn
lda = LDA()
proj = lda.fit(X, y).transform(X)
y_pred = lda.predict(X)

errors =  y_pred != y
print("Nb errors=%i, error rate=%.2f" % (errors.sum(), errors.sum() / len(y_pred)))

# Use pandas & seaborn for convenience
data = pd.DataFrame(dict(x0=X[:, 0], x1=X[:, 1], y=["c"+str(v) for v in y]))
plt.figure()
g = sns.PairGrid(data, hue="y")
g.map_diag(plt.hist)
g.map_offdiag(plt.scatter)
g.add_legend()



plt.figure()
开发者ID:neurospin,项目名称:pystatsml,代码行数:33,代码来源:ml_linear_classification.py


注:本文中的sklearn.discriminant_analysis.LinearDiscriminantAnalysis.predict方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。