当前位置: 首页>>代码示例>>Python>>正文


Python QDA.score方法代码示例

本文整理汇总了Python中sklearn.qda.QDA.score方法的典型用法代码示例。如果您正苦于以下问题:Python QDA.score方法的具体用法?Python QDA.score怎么用?Python QDA.score使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.qda.QDA的用法示例。


在下文中一共展示了QDA.score方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_QDA

# 需要导入模块: from sklearn.qda import QDA [as 别名]
# 或者: from sklearn.qda.QDA import score [as 别名]
def get_QDA(Xtrain, Xtest, Ytrain, Ytest):
    qda = QDA()
    qda.fit(Xtrain,Ytrain)
#    predLabels = qda.predict(Xtest)
#    print("Classification Rate Test QDA: " + str(np.mean(Ytest==predLabels)*100) + " %")
    scores = np.empty((4))
    scores[0] = qda.score(Xtrain,Ytrain)
    scores[1] = qda.score(Xtest,Ytest)
    print('QDA, train: {0:.02f}% '.format(scores[0]*100))
    print('QDA, test: {0:.02f}% '.format(scores[1]*100))
    return qda
开发者ID:manuwhs,项目名称:Trapyng,代码行数:13,代码来源:system_modules.py

示例2: performSVMClass

# 需要导入模块: from sklearn.qda import QDA [as 别名]
# 或者: from sklearn.qda.QDA import score [as 别名]
def performSVMClass(X_train, y_train, X_test, y_test, parameters, fout, savemodel):
	"""
	SVM binary classification
	"""
	clf = QDA()
	clf.fit(X_train, y_train)

	accuracy = clf.score(X_test, y_test)
	return accuracy
开发者ID:jko0531,项目名称:Machine-Learning,代码行数:11,代码来源:prediction.py

示例3: performQDAClass

# 需要导入模块: from sklearn.qda import QDA [as 别名]
# 或者: from sklearn.qda.QDA import score [as 别名]
def performQDAClass(X_train, y_train, X_test, y_test):
    """
    Gradient Tree Boosting binary Classification
    """
    clf = QDA()
    clf.fit(X_train, y_train)
    accuracy = clf.score(X_test, y_test)
    #auc = roc_auc_score(y_test, clf.predict(X_test))
    return accuracy
开发者ID:FraPochetti,项目名称:StocksProject,代码行数:11,代码来源:functions.py

示例4: table_4_1

# 需要导入模块: from sklearn.qda import QDA [as 别名]
# 或者: from sklearn.qda.QDA import score [as 别名]
def table_4_1():
    """Reproduces table 4.1 in ESLii showing the training and test error rates
    for classifying vowels using different classification techniques. The
    sklearn implementation of logistic regression uses OvA instead of a true
    multinomial which likely accounts for the worse results
    """
    vowels_train = eslii.read_vowel_data()
    train_X = vowels_train[vowels_train.columns[1:]]
    train_y = vowels_train['y']
    vowels_test = eslii.read_vowel_data(train=False)
    test_X = vowels_test[vowels_test.columns[1:]]
    test_y = vowels_test['y']

    lda = LDA().fit(train_X, train_y)
    print "Linear discriminant analysis:  {:.2f} {:.2f}".format(
        1 - lda.score(train_X, train_y), 1 - lda.score(test_X, test_y))
    qda = QDA().fit(train_X, train_y)
    print "Quadratic discriminant analysis:  {:.2f} {:.2f}".format(
        1 - qda.score(train_X, train_y), 1 - qda.score(test_X, test_y))
    lr = LogisticRegression(C=1e30).fit(train_X, train_y)
    print "Logistic regression:  {:.2f} {:.2f}".format(
        1 - lr.score(train_X, train_y), 1 - lr.score(test_X, test_y))
开发者ID:syting,项目名称:esl,代码行数:24,代码来源:ch4.py

示例5: performQDAClass

# 需要导入模块: from sklearn.qda import QDA [as 别名]
# 或者: from sklearn.qda.QDA import score [as 别名]
def performQDAClass(X_train, y_train, X_test, y_test, parameters, fout, savemodel):
    """
    Quadratic Discriminant Analysis binary Classification
    """
    def replaceTiny(x):
        if (abs(x) < 0.0001):
            x = 0.0001
    
    X_train = X_train.apply(replaceTiny)
    X_test = X_test.apply(replaceTiny)
    
    clf = QDA()
    clf.fit(X_train, y_train)

    if savemodel == True:
        fname_out = '{}-{}.pickle'.format(fout, datetime.now())
        with open(fname_out, 'wb') as f:
            cPickle.dump(clf, f, -1)    
    
    accuracy = clf.score(X_test, y_test)
    
    return accuracy
开发者ID:shivamkejriwal,项目名称:stockMarketAnalysis,代码行数:24,代码来源:test.py

示例6: print

# 需要导入模块: from sklearn.qda import QDA [as 别名]
# 或者: from sklearn.qda.QDA import score [as 别名]
data = np.load("sd.npy")
truth = np.load("truth.npy")

testdata = np.load("sd_test.npy")
testtruth = np.load("truth_test.npy")

print(len(data))

clf = QDA()
clf.fit(data,truth)

output=open("qda.pkl",'wb')

pickle.dump(clf,output)

output.close()

print(clf.score(data,truth))
print(clf.score(testdata,testtruth))

s = np.where(truth == 2)[0]
st = np.where(testtruth == 2)[0]
g = np.where(truth == 1)[0]
gt = np.where(testtruth == 1)[0]
print("Stars")
print(clf.score(data[s],truth[s]))
print(clf.score(testdata[st],testtruth[st]))
print("Galaxies")
print(clf.score(data[g],truth[g]))
print(clf.score(testdata[gt],testtruth[gt]))
开发者ID:nkurinsky,项目名称:CS229-Project,代码行数:32,代码来源:qda.py

示例7: trainQDA

# 需要导入模块: from sklearn.qda import QDA [as 别名]
# 或者: from sklearn.qda.QDA import score [as 别名]
def trainQDA(XTrain, YTrain, XValid, YValid):
    qda = QDA()
    qda.fit(XTrain, YTrain)    
    print('QDA score : %f' % (qda.score(XValid, YValid)))
开发者ID:LemonATsu,项目名称:Instrument-Classifier,代码行数:6,代码来源:train.py

示例8: LDA

# 需要导入模块: from sklearn.qda import QDA [as 别名]
# 或者: from sklearn.qda.QDA import score [as 别名]
############ LDA #####################
#Construcción y Fit del modelo LDA
lda_model = LDA()
lda_model.fit(X_std,y)
#Score conjunto de entrenamiento y conjunto de testing.
print lda_model.score(X_std,y)
print lda_model.score(X_std_test,ytest)


############ QDA #####################
#Construcción y Fit del modelo QDA
qda_model = QDA()
qda_model.fit(X_std,y)
#Score conjunto de entrenamiento y conjunto de testing.
print qda_model.score(X_std,y)
print qda_model.score(X_std_test,ytest)

# ############ KNN #####################
# #Construcción y Fit del modelo KNN
# knn_model = KNeighborsClassifier(n_neighbors=10)
# knn_model.fit(X_std,y)
# #Score conjunto de entrenamiento y conjunto de testing.
# print knn_model.score(X_std,y)
# print knn_model.score(X_std_test,ytest)
#
#
# score_training=[]
# score_test=[]
# Lclasses=range(1,len_training_set+1)
# #Comportamiento KNN
开发者ID:Seba93,项目名称:INF390-2016-T3,代码行数:32,代码来源:Parte+a.py

示例9: main

# 需要导入模块: from sklearn.qda import QDA [as 别名]
# 或者: from sklearn.qda.QDA import score [as 别名]
def main():
    #Define our connection string
    conn_string = "host='localhost' dbname='CRAWL4J' user='postgres' password='mogette'"
    # print the connection string we will use to connect
    print "Connecting to database\n    ->%s" % (conn_string)
 
    # get a connection, if a connect cannot be made an exception will be raised here
    conn = psycopg2.connect(conn_string)
    
    # fetching training data from Cdiscount-maison
    cdiscount_maison_request = "select url, whole_text, title, h1, short_description, status_code, depth, outlinks_size, inlinks_size, nb_breadcrumbs, nb_aggregated_ratings, nb_ratings_values, nb_prices, nb_availabilities, nb_reviews, nb_reviews_count, nb_images, nb_search_in_url, nb_add_in_text, nb_filter_in_text, nb_search_in_text, nb_guide_achat_in_text, nb_product_info_in_text, nb_livraison_in_text, nb_garanties_in_text, nb_produits_similaires_in_text, nb_images_text, width_average, height_average, page_rank, page_type, concurrent_name, last_update, semantic_hits, semantic_title, inlinks_semantic, inlinks_semantic_count  from arbocrawl_results  where page_type !='Unknown' and concurrent_name = 'Cdiscount-maison' "; 
    catPred=["PAGE DEPTH AT SITE LEVEL","NUMBER OF OUTGOING LINKS","NUMBER OF INCOMING LINKS","NUMBER OF ITEMTYPE http://data-vocabulary.org/Breadcrumb","NUMBER OF ITEMPROP aggregateRating","NUMBER OF ITEMPROP ratingValue","NUMBER OF ITEMPROP price","NUMBER OF ITEMPROP availability","NUMBER OF ITEMPROP review","NUMBER OF ITEMPROP reviewCount","NUMBER OF ITEMPROP image","NUMBER OF OCCURENCES FOUND IN URL of search + recherche + Recherche + Search","NUMBER OF OCCURENCES FOUND IN PAGE TEXT ajout + ajouter + Ajout + Ajouter","NUMBER OF OCCURENCES FOUND IN PAGE TEXT filtre + facette + Filtre + Facette + filtré + filtrés","NUMBER OF OCCURENCES FOUND IN PAGE TEXT Ma recherche + Votre recherche + résultats pour + résultats associés","NUMBER OF OCCURENCES FOUND IN PAGE TEXT guide d""achat + Guide d""achat","NUMBER OF OCCURENCES FOUND IN PAGE TEXT caractéristique + Caractéristique + descriptif + Descriptif +information + Information","NUMBER OF OCCURENCES FOUND IN PAGE TEXT livraison + Livraison + frais de port + Frais de port","NUMBER OF OCCURENCES FOUND IN PAGE TEXT garantie + Garantie +assurance + Assurance","NUMBER OF OCCURENCES FOUND IN PAGE TEXT Produits Similaires + produits similaires + Meilleures Ventes + meilleures ventes +Meilleures ventes + Nouveautés + nouveautés + Nouveauté + nouveauté","NUMBER OF HTML TAG img IN THE PAGE","AVERAGE WIDTH OF HTML TAG img IN THE PAGE","AVERAGE HEIGHT OF HTML TAG img IN THE PAGE"];
    semPred =["PAGE TEXT", "PAGE TITLE", "PAGE H1", "PAGE SHORT DESCRIPTION","TEN BEST TF/IDF HITS FOR THE PAGE","TITLE TF/IDF","PAGE INCOMING LINKS ANCHOR SEMANTIC"];

    print "Executing the following request to fetch data for Cdiscount-maison from the ARBOCRAWL_RESULTS table : " + cdiscount_maison_request
    print"Page-type predictors : "+ ', '.join(catPred)
    print"Semantic predictors : " + ', '.join(semPred)

    df = pd.read_sql(cdiscount_maison_request, conn)
    
  
    url_list = df.url.values
    semantic_columns = ["url","title","h1","short_description","semantic_hits", "semantic_title", "inlinks_semantic"];
    semantic_predictors = df[list(semantic_columns)].values;
    
    classifying_columns = ["depth", "outlinks_size", "inlinks_size", "nb_breadcrumbs", "nb_aggregated_ratings", "nb_ratings_values", "nb_prices", "nb_availabilities", "nb_reviews", "nb_reviews_count", "nb_images", "nb_search_in_url", "nb_add_in_text", "nb_filter_in_text", "nb_search_in_text", "nb_guide_achat_in_text", "nb_product_info_in_text", "nb_livraison_in_text", "nb_garanties_in_text", "nb_produits_similaires_in_text", "nb_images_text", "width_average","height_average"]
    classifying_predictors = df[list(classifying_columns)].values;
    X= np.asanyarray(classifying_predictors);
    y = df.page_type.values;

    print type(X)
    print X.shape
    print type(y)
    print y.shape
    
    # fetching the data to predict
    to_predict_request = "select url, whole_text, title, h1, short_description, status_code, depth, outlinks_size, inlinks_size, nb_breadcrumbs, nb_aggregated_ratings, nb_ratings_values, nb_prices, nb_availabilities, nb_reviews, nb_reviews_count, nb_images, nb_search_in_url, nb_add_in_text, nb_filter_in_text, nb_search_in_text, nb_guide_achat_in_text, nb_product_info_in_text, nb_livraison_in_text, nb_garanties_in_text, nb_produits_similaires_in_text, nb_images_text, width_average, height_average, page_rank, page_type, concurrent_name, last_update, semantic_hits, semantic_title, inlinks_semantic, inlinks_semantic_count  from arbocrawl_results  where concurrent_name != 'Cdiscount-maison' "; 
    df_to_predict = pd.read_sql(to_predict_request, conn)
    # df_to_predict.dropna()
    # df_to_predict.replace([np.inf, -np.inf], np.nan).dropna(subset=list(classifying_columns), how="all")
    # df_to_predict.dropna(subset=list(classifying_columns), how="all", with_inf=True)
    # indexnan = sum(np.isnan(Xval))
    # indexinfinite = np.isfinite(Xval)
    classifying_predictors_to_predict = df_to_predict[list(classifying_columns)].values;
    Xval= np.asanyarray(classifying_predictors_to_predict);
    print type(Xval)
    print Xval.shape
    
    url_val_list = df_to_predict.url.values
    print type(url_val_list)
    print url_val_list.shape
    
    # we must here filter the NaN / Infinity in Xval values
    #print np.isnan(Xval)
    #Xval = Xval[~np.isnan(Xval)]
    #print Xval.shape
 
    # transforming the predictors / rescaling the predictors
    # we don't need to do that
    #X = StandardScaler().fit_transform(X)
    #Xval = StandardScaler().fit_transform(Xval)
    
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
    single_tree = DecisionTreeClassifier(max_depth=5)
    single_tree.fit(X_train, y_train)
    single_tree_score = single_tree.score(X_test, y_test)
    print "Single tree score " + str(single_tree_score)
    
    random_forest = RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1)
    random_forest.fit(X_train, y_train)
    random_forest_score = random_forest.score(X_test, y_test)
    print "Random forest score " + str(random_forest_score)
    
    kneighbors =  KNeighborsClassifier(3)
    kneighbors.fit(X_train, y_train)
    kneighbors_score = kneighbors.score(X_test, y_test)
    print "K-Neighbors score " + str(kneighbors_score)
    
    adaboost =  AdaBoostClassifier()
    adaboost.fit(X_train, y_train)
    adaboost_score = adaboost.score(X_test, y_test)
    print "Ada boost score " + str(adaboost_score)

    gaussian_nb =  GaussianNB()
    gaussian_nb.fit(X_train, y_train)
    gaussian_nb_score = gaussian_nb.score(X_test, y_test)
    print "gaussian mixtures score " + str(gaussian_nb_score)
    
    lda =  LDA()
    lda.fit(X_train, y_train)
    lda_nb_score = lda.score(X_test, y_test)
    print "linear discriminant score " + str(lda_nb_score)
    
    qda =  QDA()
    qda.fit(X_train, y_train)
    qda_nb_score = qda.score(X_test, y_test)
    print "quadratic discriminant score " + str(qda_nb_score)
    
    #SVC(kernel="linear", C=0.025),
    #SVC(gamma=2, C=1),
#.........这里部分代码省略.........
开发者ID:sduprey,项目名称:PYTHON_WEB,代码行数:103,代码来源:panda_data_fetching.py

示例10: range

# 需要导入模块: from sklearn.qda import QDA [as 别名]
# 或者: from sklearn.qda.QDA import score [as 别名]
for i in range(0,9): 
	probas[i]=probas[i]/528

yhat_apriori = np.argmax(probas) + 1

print "Clase: %d"%yhat_apriori

######## Pregunta (g) ############################################################

lda_model = LDA()
lda_model.fit(X_std,y)
print "Score LDA train: %f"%lda_model.score(X_std,y)
print "Score LDA test: %f"%lda_model.score(X_std_test,ytest)
qda_model = QDA()
qda_model.fit(X_std,y)
print "Score QDA train: %f"%qda_model.score(X_std,y)
print "Score QDA test: %f"%qda_model.score(X_std_test,ytest)
knn_model = KNeighborsClassifier(n_neighbors=10)
knn_model.fit(X_std,y)
print "Score KNN train: %f"%knn_model.score(X_std,y)
print "Score KNN test: %f"%knn_model.score(X_std_test,ytest)

values_train = []
values_test = []
for i in range(1, 12):
	knn_model = KNeighborsClassifier(n_neighbors=i)
	knn_model.fit(X_std,y)
	values_train.append(knn_model.score(X_std,y))

for i in range(1, 12):
	knn_model = KNeighborsClassifier(n_neighbors=i)
开发者ID:Paulinyta,项目名称:Tarea3_AID,代码行数:33,代码来源:pregunta1.py

示例11: main

# 需要导入模块: from sklearn.qda import QDA [as 别名]
# 或者: from sklearn.qda.QDA import score [as 别名]
def main():
    #Define our connection string
    conn_string = "host='localhost' dbname='CRAWL4J' user='postgres' password='mogette'"
    # print the connection string we will use to connect
    print "Connecting to database\n    ->%s" % (conn_string)
 
    # get a connection, if a connect cannot be made an exception will be raised here
    conn = psycopg2.connect(conn_string)
 
    # conn.cursor will return a cursor object, you can use this cursor to perform queries
    cursor = conn.cursor()
 
    # execute our Query
    # X = np.asarray(predictors_list);
    
    my_request = "select url, whole_text, title, h1, short_description, status_code, depth, outlinks_size, inlinks_size, nb_breadcrumbs, nb_aggregated_ratings, nb_ratings_values, nb_prices, nb_availabilities, nb_reviews, nb_reviews_count, nb_images, nb_search_in_url, nb_add_in_text, nb_filter_in_text, nb_search_in_text, nb_guide_achat_in_text, nb_product_info_in_text, nb_livraison_in_text, nb_garanties_in_text, nb_produits_similaires_in_text, nb_images_text, width_average, height_average, page_rank, page_type, concurrent_name, last_update, semantic_hits, semantic_title, inlinks_semantic, inlinks_semantic_count  from arbocrawl_results  where concurrent_name = (%s) "; 
    #url 0, whole_text 1, title 2, h1 3, short_description 4, status_code 5, depth 6, outlinks_size 7, inlinks_size 8, nb_breadcrumbs 9, nb_aggregated_ratings 10, nb_ratings_values 11, nb_prices 12, nb_availabilities 13, nb_reviews 14, nb_reviews_count 15, nb_images 16, nb_search_in_url 17, nb_add_in_text 18, nb_filter_in_text 19, nb_search_in_text 20, nb_guide_achat_in_text 21, nb_product_info_in_text 22, nb_livraison_in_text 23, nb_garanties_in_text 24, nb_produits_similaires_in_text 25, nb_images_text 26, width_average 27, height_average 28, page_rank 29, page_type 30, concurrent_name 31, last_update 32, semantic_hits 33, semantic_title 34, inlinks_semantic 35, inlinks_semantic_count 36  from arbocrawl_results 
    catPred=["PAGE DEPTH AT SITE LEVEL","NUMBER OF OUTGOING LINKS","NUMBER OF INCOMING LINKS","NUMBER OF ITEMTYPE http://data-vocabulary.org/Breadcrumb","NUMBER OF ITEMPROP aggregateRating","NUMBER OF ITEMPROP ratingValue","NUMBER OF ITEMPROP price","NUMBER OF ITEMPROP availability","NUMBER OF ITEMPROP review","NUMBER OF ITEMPROP reviewCount","NUMBER OF ITEMPROP image","NUMBER OF OCCURENCES FOUND IN URL of search + recherche + Recherche + Search","NUMBER OF OCCURENCES FOUND IN PAGE TEXT ajout + ajouter + Ajout + Ajouter","NUMBER OF OCCURENCES FOUND IN PAGE TEXT filtre + facette + Filtre + Facette + filtré + filtrés","NUMBER OF OCCURENCES FOUND IN PAGE TEXT Ma recherche + Votre recherche + résultats pour + résultats associés","NUMBER OF OCCURENCES FOUND IN PAGE TEXT guide d""achat + Guide d""achat","NUMBER OF OCCURENCES FOUND IN PAGE TEXT caractéristique + Caractéristique + descriptif + Descriptif +information + Information","NUMBER OF OCCURENCES FOUND IN PAGE TEXT livraison + Livraison + frais de port + Frais de port","NUMBER OF OCCURENCES FOUND IN PAGE TEXT garantie + Garantie +assurance + Assurance","NUMBER OF OCCURENCES FOUND IN PAGE TEXT Produits Similaires + produits similaires + Meilleures Ventes + meilleures ventes +Meilleures ventes + Nouveautés + nouveautés + Nouveauté + nouveauté","NUMBER OF HTML TAG img IN THE PAGE","AVERAGE WIDTH OF HTML TAG img IN THE PAGE","AVERAGE HEIGHT OF HTML TAG img IN THE PAGE"];
    semPred =["PAGE TEXT", "PAGE TITLE", "PAGE H1", "PAGE SHORT DESCRIPTION","TEN BEST TF/IDF HITS FOR THE PAGE","TITLE TF/IDF","PAGE INCOMING LINKS ANCHOR SEMANTIC"];

    print "Executing the following request to fetch data for Cdiscount-maison from the ARBOCRAWL_RESULTS table : " + my_request
    print"Page-type predictors : "+ ', '.join(catPred)
    print"Semantic predictors : " + ', '.join(semPred)
    

    # fetching training data from Cdiscount-maison
    my_filtered_request = "select url, whole_text, title, h1, short_description, status_code, depth, outlinks_size, inlinks_size, nb_breadcrumbs, nb_aggregated_ratings, nb_ratings_values, nb_prices, nb_availabilities, nb_reviews, nb_reviews_count, nb_images, nb_search_in_url, nb_add_in_text, nb_filter_in_text, nb_search_in_text, nb_guide_achat_in_text, nb_product_info_in_text, nb_livraison_in_text, nb_garanties_in_text, nb_produits_similaires_in_text, nb_images_text, width_average, height_average, page_rank, page_type, concurrent_name, last_update, semantic_hits, semantic_title, inlinks_semantic, inlinks_semantic_count  from arbocrawl_results  where page_type !='Unknown' and concurrent_name = (%s) "; 
    cursor.execute(my_filtered_request,("Cdiscount-maison",)); 
    # retrieve the records from the database
    records = cursor.fetchall()
    url_list = [item[0] for item in records];
    semantic_list =  [(item[1],item[2],item[3],item[4],item[33],item[34],item[35]) for item in records];
    predictor_list = [(item[6],item[7],item[8],item[9],item[10],item[11],item[12],item[13],item[14],item[15],item[16],item[17],item[18],item[19],item[20],item[21],item[22],item[23],item[24],item[25],item[26],item[27],item[28]) for item in records];
    output_list    = [item[30] for item in records];
    y=[assign_enumerated_value(output) for output in output_list]
    X= np.asanyarray(predictor_list);
    y= np.asanyarray(y);
    print type(X)
    print X.shape
    print type(y)
    print y.shape
    
    # fetching the data to predict
    my_to_predict_request = "select url, whole_text, title, h1, short_description, status_code, depth, outlinks_size, inlinks_size, nb_breadcrumbs, nb_aggregated_ratings, nb_ratings_values, nb_prices, nb_availabilities, nb_reviews, nb_reviews_count, nb_images, nb_search_in_url, nb_add_in_text, nb_filter_in_text, nb_search_in_text, nb_guide_achat_in_text, nb_product_info_in_text, nb_livraison_in_text, nb_garanties_in_text, nb_produits_similaires_in_text, nb_images_text, width_average, height_average, page_rank, page_type, concurrent_name, last_update, semantic_hits, semantic_title, inlinks_semantic, inlinks_semantic_count  from arbocrawl_results  where concurrent_name != (%s) "; 
    cursor.execute(my_to_predict_request,("Cdiscount-maison",)); 
    # retrieve the records from the database
    records_to_validate = cursor.fetchall()
    url_to_validate_list = [item[0] for item in records_to_validate];
    semantic_to_validate_list =  [(item[1],item[2],item[3],item[4],item[33],item[34],item[35]) for item in records_to_validate];
    predictor_to_validate_list = [(item[6],item[7],item[8],item[9],item[10],item[11],item[12],item[13],item[14],item[15],item[16],item[17],item[18],item[19],item[20],item[21],item[22],item[23],item[24],item[25],item[26],item[27],item[28]) for item in records_to_validate];
    output_to_validate_list    = [item[30] for item in records_to_validate];
    
    Xval= np.asanyarray(predictor_to_validate_list);
    print type(Xval)
    print Xval.shape
    # we must here filter the NaN / Infinity in Xval values
    print np.isnan(Xval)
    Xval = Xval[~np.isnan(Xval)]
    print Xval.shape
 
    # transforming the predictors / rescaling the predictors
    # we don't need to do that
    #X = StandardScaler().fit_transform(X)
    #Xval = StandardScaler().fit_transform(Xval)
    
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
    single_tree = DecisionTreeClassifier(max_depth=5)
    single_tree.fit(X, output_list)
    single_tree.fit(X_train, y_train)
    single_tree_score = single_tree.score(X_test, y_test)
    print "Single tree score " + str(single_tree_score)
    
    random_forest = RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1)
    random_forest.fit(X_train, y_train)
    random_forest_score = random_forest.score(X_test, y_test)
    print "Random forest score " + str(random_forest_score)
    
    kneighbors =  KNeighborsClassifier(3)
    kneighbors.fit(X_train, y_train)
    kneighbors_score = kneighbors.score(X_test, y_test)
    print "K-Neighbors score " + str(kneighbors_score)
    
    adaboost =  AdaBoostClassifier()
    adaboost.fit(X_train, y_train)
    adaboost_score = adaboost.score(X_test, y_test)
    print "Ada boost score " + str(adaboost_score)

    gaussian_nb =  GaussianNB()
    gaussian_nb.fit(X_train, y_train)
    gaussian_nb_score = gaussian_nb.score(X_test, y_test)
    print "gaussian mixtures score " + str(gaussian_nb_score)
    
    lda =  LDA()
    lda.fit(X_train, y_train)
    lda_nb_score = lda.score(X_test, y_test)
    print "linear discriminant score " + str(lda_nb_score)
    
    qda =  QDA()
    qda.fit(X_train, y_train)
    qda_nb_score = qda.score(X_test, y_test)
#.........这里部分代码省略.........
开发者ID:sduprey,项目名称:PYTHON_WEB,代码行数:103,代码来源:data_fetching.py

示例12: StandardScaler

# 需要导入模块: from sklearn.qda import QDA [as 别名]
# 或者: from sklearn.qda.QDA import score [as 别名]
    
from sklearn.lda import LDA
from sklearn.qda import QDA
from sklearn.neighbors import KNeighborsClassifier
Xtest = test_df.ix[:,'x.1':'x.10'].values
ytest = test_df.ix[:,'y'].values
X_std_test = StandardScaler().fit_transform(Xtest)

lda_model = LDA()
lda_model.fit(X_std,y)
print lda_model.score(X_std,y)
print lda_model.score(X_std_test,ytest)

qda_model = QDA()
qda_model.fit(X_std,y)
print qda_model.score(X_std,y)
print qda_model.score(X_std_test,ytest)

knn_model = KNeighborsClassifier(n_neighbors=10)
knn_model.fit(X_std,y)
print knn_model.score(X_std,y)
print knn_model.score(X_std_test,ytest)

plt.figure(figsize=(12, 8))
train_scores = []
test_scores = []
for k in range(1,21):
    knn_model = KNeighborsClassifier(n_neighbors=k)
    knn_model.fit(X_std,y)
    train_scores += [knn_model.score(X_std,y)]
    test_scores += [knn_model.score(X_std_test,ytest)]
开发者ID:juanfel,项目名称:tarea3-analisis-inteligente-de-datos,代码行数:33,代码来源:parte_1.py


注:本文中的sklearn.qda.QDA.score方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。