当前位置: 首页>>代码示例>>Python>>正文


Python DecisionTreeClassifier.score方法代码示例

本文整理汇总了Python中sklearn.tree.DecisionTreeClassifier.score方法的典型用法代码示例。如果您正苦于以下问题:Python DecisionTreeClassifier.score方法的具体用法?Python DecisionTreeClassifier.score怎么用?Python DecisionTreeClassifier.score使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.tree.DecisionTreeClassifier的用法示例。


在下文中一共展示了DecisionTreeClassifier.score方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: dt_results

# 需要导入模块: from sklearn.tree import DecisionTreeClassifier [as 别名]
# 或者: from sklearn.tree.DecisionTreeClassifier import score [as 别名]
def dt_results():
	print "--------------DecisionTreeClassifier-----------------"
	rang = [None, 10, 20, 50, 100, 200, 400]
	
	print "--------------With HOG-----------------"
	ans = []
	print "maxDepth	Accuracy"
	for i in rang:
		clf = DecisionTreeClassifier(max_depth=i)
		clf.fit(X_train_hog, y_train)
		mean_accuracy = clf.score(X_test_hog, y_test)
		print i, "	", mean_accuracy
		ans.append('('+str(i)+", "+str(mean_accuracy)+')')
	print ans
	
	print "\n--------------Without HOG-----------------"
	ans = []
	print "maxDepth	Accuracy"
	for i in rang:
		clf = DecisionTreeClassifier(max_depth=i)
		clf.fit(X_train, y_train)
		mean_accuracy = clf.score(X_test, y_test)
		print i, "	", mean_accuracy
		ans.append('('+str(i)+", "+str(mean_accuracy)+')')
	print ans
开发者ID:vickianand,项目名称:object-classification-for-surveillance,代码行数:27,代码来源:test_classifiers.py

示例2: test_bootstrap_samples

# 需要导入模块: from sklearn.tree import DecisionTreeClassifier [as 别名]
# 或者: from sklearn.tree.DecisionTreeClassifier import score [as 别名]
def test_bootstrap_samples():
    # Test that bootstrapping samples generate non-perfect base estimators.
    X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50},
                          random_state=0)
    X_train, X_test, y_train, y_test = train_test_split(X, y,
                                                        random_state=0)

    base_estimator = DecisionTreeClassifier().fit(X_train, y_train)

    # without bootstrap, all trees are perfect on the training set
    # disable the resampling by passing an empty dictionary.
    ensemble = BalancedBaggingClassifier(
        base_estimator=DecisionTreeClassifier(),
        max_samples=1.0,
        bootstrap=False,
        n_estimators=10,
        ratio={},
        random_state=0).fit(X_train, y_train)

    assert (ensemble.score(X_train, y_train) ==
            base_estimator.score(X_train, y_train))

    # with bootstrap, trees are no longer perfect on the training set
    ensemble = BalancedBaggingClassifier(
        base_estimator=DecisionTreeClassifier(),
        max_samples=1.0,
        bootstrap=True,
        random_state=0).fit(X_train, y_train)

    assert (ensemble.score(X_train, y_train) <
            base_estimator.score(X_train, y_train))
开发者ID:glemaitre,项目名称:imbalanced-learn,代码行数:33,代码来源:test_classifier.py

示例3: main

# 需要导入模块: from sklearn.tree import DecisionTreeClassifier [as 别名]
# 或者: from sklearn.tree.DecisionTreeClassifier import score [as 别名]
def main():
    titanic = pandas.read_csv('dataset/titanic.csv')

    # 分离数据特征与预测目标
    x_set = titanic.drop(['row.names', 'name', 'survived'], axis=1)
    y_set = titanic['survived']
    x_set.fillna(x_set['age'].mean(), inplace=True)

    x_train, x_test, y_train, y_test = utils.prepare_train_and_test_sets(x_set, y_set)

    # 类别型特征向量化
    dict_vectorizer = DictVectorizer()
    x_train = dict_vectorizer.fit_transform(x_train.to_dict(orient='record'))
    x_test = dict_vectorizer.transform(x_test.to_dict(orient='record'))
    print(dict_vectorizer.feature_names_)
    print("=" * 100)

    decision_tree_classifier = DecisionTreeClassifier(criterion='entropy')
    utils.get_trained_result(decision_tree_classifier, x_test, x_train, y_test, y_train)

    # 筛选前20%的特征
    select_percentile = feature_selection.SelectPercentile(feature_selection.chi2, percentile=20)
    x_train_fs = select_percentile.fit_transform(x_train, y_train)
    x_test_fs = select_percentile.transform(x_test)
    decision_tree_classifier.fit(x_train_fs, y_train)
    print(decision_tree_classifier.score(x_test_fs, y_test))

    # 通过交叉验证,按照固定间隔的百分比筛选特征,并作图展现性能随特征筛选比例的变化
    percentiles = range(1, 100, 2)
    results = []

    for i in percentiles:
        new_fs = feature_selection.SelectPercentile(feature_selection.chi2, percentile=i)
        x_train_fs = new_fs.fit_transform(x_train, y_train)
        scores = cross_val_score(decision_tree_classifier, x_train_fs, y_train, cv=5)
        results = numpy.append(results, scores.mean())

    print(results)

    # 找到提现最佳性能的特征筛选的百分比
    opt = numpy.where(results == results.max())[0]
    best_percentile = percentiles[opt[0]]
    print('Optimal number of features: {}'.format(best_percentile))

    # 使用最佳筛选后的特征,利用相同配置的模型在测试集上进行性能评估
    pylab.plot(percentiles, results)
    pylab.xlabel('percentiles of feature')
    pylab.ylabel('accuracy')
    pylab.show()

    best_fs = feature_selection.SelectPercentile(feature_selection.chi2, percentile=best_percentile)
    x_train_fs = best_fs.fit_transform(x_train, y_train)
    decision_tree_classifier.fit(x_train_fs, y_train)
    x_test_fs = best_fs.transform(x_test)
    print('new score: {}'.format(decision_tree_classifier.score(x_test_fs, y_test)))
开发者ID:ACEGuiPeng,项目名称:kaggle_demo_tests,代码行数:57,代码来源:example_titanic.py

示例4: main

# 需要导入模块: from sklearn.tree import DecisionTreeClassifier [as 别名]
# 或者: from sklearn.tree.DecisionTreeClassifier import score [as 别名]
def main():

    # generate synthetic binary classification data
    # (name refers to example 10.2 in ESL textbook...see refs below)
    X, y = make_hastie_10_2()

    # perform train/test split (no need to shuffle)
    split_pt = int(TRAIN_PCT * len(X))
    X_train, X_test = X[:split_pt], X[split_pt:]
    y_train, y_test = y[:split_pt], y[split_pt:]

    # single dec stump
    stump_clf = DecisionTreeClassifier(
        max_depth=1)
    stump_clf.fit(X_train, y_train)
    stump_score = round(stump_clf.score(X_test, y_test), 3)
    print 'decision stump acc = {}\t(max_depth = 1)'.format(stump_score)

    # single dec tree (max_depth=3)
    tree_clf = DecisionTreeClassifier(max_depth=3)
    tree_clf.fit(X_train, y_train)
    tree_score = round(tree_clf.score(X_test, y_test), 3)
    print 'decision tree acc = {}\t(max_depth = 5)\n'.format(tree_score)

    # gbt: a powerful ensemble technique
    gbt_scores = list()
    for k in (10, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500):
        print 'fitting gbt for n_estimators = {}...'.format(k)

        gbt_clf = GradientBoostingClassifier(
            n_estimators=k,         # number of weak learners for this iteration
            max_depth=1,            # weak learners are dec stumps
            learning_rate=1.0)      # regularization (shrinkage) hyperparam

        gbt_clf.fit(X_train, y_train)
        gbt_scores.append(round(gbt_clf.score(X_test, y_test), 3))

    print '\ngbt accuracy =\n{}\n'.format(gbt_scores)

    # stochastic gbt (using subsampling)
    sgbt_scores = list()
    for k in (10, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500):
        print 'fitting sgbt for n_estimators = {}...'.format(k)

        sgbt_clf = GradientBoostingClassifier(
            n_estimators=k,         # number of weak learners for this iteration
            max_depth=1,            # weak learners are dec stumps
            subsample=0.5,          # % of training set used by each bc
            learning_rate=1.0)      # regularization (shrinkage) hyperparam

        sgbt_clf.fit(X_train, y_train)
        sgbt_scores.append(round(sgbt_clf.score(X_test, y_test), 3))

    print '\nsgbt accuracy =\n{}'.format(sgbt_scores)
开发者ID:Adusei,项目名称:science,代码行数:56,代码来源:gbt.py

示例5: DTclassifier

# 需要导入模块: from sklearn.tree import DecisionTreeClassifier [as 别名]
# 或者: from sklearn.tree.DecisionTreeClassifier import score [as 别名]
def DTclassifier(hog_train_x,train_y,hog_test_x,test_y):



    print 'Accuracy for decision tree classifier on test data: '
   

    clf = DecisionTreeClassifier()
    clf.fit(hog_train_x,train_y)

    print clf.score(hog_test_x,test_y)
开发者ID:aayushmudgal,项目名称:CS771-MLT,代码行数:13,代码来源:makedata.py

示例6: runDecisionTreeSimulation

# 需要导入模块: from sklearn.tree import DecisionTreeClassifier [as 别名]
# 或者: from sklearn.tree.DecisionTreeClassifier import score [as 别名]
def runDecisionTreeSimulation(dataTrain, dataTest, dataHold, train_tfidf, test_tfidf, hold_tfidf):
    print 'running decision tree'
    outFile = open('decisionTreeLog.txt','a')

    outFile.write('train==> %d, %d \n'%(train_tfidf.shape[0],train_tfidf.shape[1]))
    outFile.write('test==>  %d, %d \n'%(test_tfidf.shape[0],test_tfidf.shape[1]))
    with SimpleTimer('time to train', outFile):
        clf = DecisionTreeClassifier().fit(train_tfidf, dataTrain.target)
    
    baseScore = clf.score(test_tfidf, dataTest.target)
    initHeight = clf.tree_.max_depth
    print 'baseline score %.3f base height %d' % (baseScore, initHeight)
    outFile.write('baseline score %.3f base height %d \n' % (baseScore, initHeight))
    
    
    res = []
    with SimpleTimer('time to prune', outFile):
        for height in range(initHeight, 40, -25):
#             print 'training for height %d' % height
            clf = DecisionTreeClassifier(max_depth=height).fit(train_tfidf, dataTrain.target)
            score = clf.score(hold_tfidf, dataHold.target)
            res.append((score, height))
            outFile.write('%d %.3f \n' % (height, score))
    res = sorted(res, key=lambda x:x[0], reverse=True)
    print res[:5]
    
    bestDepth = res[0][1]
    print ('best height is %d' % bestDepth)
    outFile.write('best depth is %d  and score is %.3f \n' % (bestDepth, res[0][0]))
        
    bestClf = DecisionTreeClassifier(max_depth=bestDepth)
    bestClf.fit(train_tfidf, dataTrain.target)
    
    predicted = bestClf.predict(test_tfidf)
    
    train_predict = bestClf.predict(train_tfidf)
    
    print 'testing score'
    outFile.write('testing score')
    outputScores(dataTest.target, predicted, outFile)
    print 'training score'
    outFile.write('testing score')
    outputScores(dataTrain.target, train_predict, outFile)
    
    results = predicted == dataTest.target
    wrong = []
    for i in range(len(results)):
        if not results[i]:
            wrong.append(i)
    print 'classifier got these wrong:'
    for i in wrong[:10]:
        print dataTest.data[i], dataTest.target[i]
        outFile.write('%s %d \n' % (dataTest.data[i], dataTest.target[i]))
    plot_learning_curve(bestClf, 'decision tree after pruning from %d to %d depth' % (initHeight, bestDepth), train_tfidf, dataTrain.target, cv=5, n_jobs=4)
开发者ID:anantauprety,项目名称:sentiment-analysis,代码行数:56,代码来源:decision_tree.py

示例7: arbre_decision_vecteur

# 需要导入模块: from sklearn.tree import DecisionTreeClassifier [as 别名]
# 或者: from sklearn.tree.DecisionTreeClassifier import score [as 别名]
def arbre_decision_vecteur():
    "Interprétation des images comme vecteurs de pixels et classification via un arbre de décision"
    best = np.zeros(6)
    nom = ["gini","entr"]    
    depths = [10,200,1000]
    
    for npix in range(50,200,50):
        _, data, target, _ = utils.chargementVecteursImages(mer,ailleurs,1,-1,npix)
        X_train,X_test,Y_train,Y_test=train_test_split(data,target,test_size=0.3,random_state=random.seed())
        for d in depths:
            for m in range(1, 5, 2):        
                start_time = time.time()
                ad = DecisionTreeClassifier(max_depth=d, min_samples_leaf=m, random_state=random.seed(), presort=True)
                
                x1 = np.array(X_train)
                x1 = np.reshape(x1, (x1.shape[0], x1.shape[2]))
                x2 = np.array(X_test)
                x2 = np.reshape(x2, (x2.shape[0], x2.shape[2]))
                    
                ad.fit(X=x1, y=Y_train)
                score = ad.score(x2, Y_test)
                    
                end_time = time.time()
                if score > best[0]:
                    best[0] = score
                    best[1] = d
                    best[2] = m
                    best[3] = npix
                    best[4] = end_time - start_time
                    best[5] = 0
                    
                start_time = time.time()
                ad = DecisionTreeClassifier(criterion="entropy", max_depth=d, min_samples_leaf=m, random_state=random.seed(), presort=True)
                
                x1 = np.array(X_train)
                x1 = np.reshape(x1, (x1.shape[0], x1.shape[2]))
                x2 = np.array(X_test)
                x2 = np.reshape(x2, (x2.shape[0], x2.shape[2]))
                    
                ad.fit(X=x1, y=Y_train)
                score = ad.score(x2, Y_test)
                    
                end_time = time.time()
                if score > best[0]:
                    best[0] = score
                    best[1] = d
                    best[2] = m
                    best[3] = npix
                    best[4] = end_time - start_time
                    best[5] = 1

    print("| Arbre de décision ({})       | V.Pix {:4.0f} | prof max={:4.0f}, elts par feuille={:2.0f}   | {:10.3f}ms | {:1.3f} |".format(nom[int(best[5])],best[3],best[1],best[2],best[4]*1000,best[0]))
开发者ID:laiaga,项目名称:TPSM1,代码行数:54,代码来源:classification_images.py

示例8: fitting

# 需要导入模块: from sklearn.tree import DecisionTreeClassifier [as 别名]
# 或者: from sklearn.tree.DecisionTreeClassifier import score [as 别名]
    def fitting(self,model_name):
        #number of rows taken to calculate score of each model (20% of train sample length)
        score_len=1+len(self.train_set[:,0])*20/100
        t1=time.time() #stores start time for this model
        if model_name == 'LogReg':
            from sklearn import linear_model
            mod_fit=linear_model.LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=1.0, fit_intercept=True, intercept_scaling=1)
            mod_fit.fit(self.train_set,self.Y)
            #print score_len, len(self.train_set[:,1]), self.train_set[score_len,:]
            mod_score=mod_fit.score(self.train_set[:score_len,:],self.Y[:score_len])


        elif model_name == 'PolySVC':
            from sklearn import svm
            mod_fit=svm.SVC(C=1.0,kernel='poly', degree=5, probability=True, shrinking=True,  tol=0.01, cache_size=1000)
            mod_fit.fit(self.train_set,self.Y)
            mod_score=mod_fit.score(self.train_set[:score_len,:],self.Y[:score_len])

        elif model_name == 'rbfSVC':
            from sklearn import svm
            mod_fit=svm.SVC(kernel='rbf', probability=True, shrinking=True,  tol=0.01, cache_size=1000)
            mod_fit.fit(self.train_set,self.Y)
            mod_score=mod_fit.score(self.train_set[:score_len,:],self.Y[:score_len])

        elif model_name == 'decTree':
            from sklearn.tree import DecisionTreeClassifier
            #minimum split size (for the node) is 2 (for small sets) + (no. of observations)/(10*no of features)
            min_splitsize=2+len(self.train_set[:,0])/(10*len(self.train_set[0,:])) #not the most professional stop criterion
            mod_fit=DecisionTreeClassifier(criterion='gini', splitter='best', min_samples_split=min_splitsize)
            mod_fit.fit(self.train_set,self.Y)
            mod_score=mod_fit.score(self.train_set[:score_len,:],self.Y[:score_len])

        elif model_name == 'randForr':
            from sklearn.ensemble import RandomForestClassifier
            #minimum split size (for the node) is 2 (for small sets) + (no. of observations)/(10*no of features)
            min_splitsize=2+len(self.train_set[:,0])/(10*len(self.train_set[0,:])) #not the most professional stop criterion
            mod_fit=RandomForestClassifier(n_estimators=25, criterion='gini', min_samples_split=min_splitsize)
            mod_fit.fit(self.train_set,self.Y)
            mod_score=mod_fit.score(self.train_set[:score_len,:],self.Y[:score_len])

        elif model_name == 'KNear':
            from sklearn.neighbors import KNeighborsClassifier
            #no of neighbors is 2 (for small sets) + (no. of observations)/(10*no of features)
            n_near=2+len(self.train_set[:,0])/(10*len(self.train_set[0,:])) #not the most professional criterion:D
            mod_fit=KNeighborsClassifier(n_neighbors=n_near)
            mod_fit.fit(self.train_set,self.Y)
            mod_score=mod_fit.score(self.train_set[:score_len,:],self.Y[:score_len])

        #print message when fitting is done
        print 'model {0} fitted in {1} sec. with fitting score {2}\n'.format(model_name,time.time()-t1,mod_score)
        #returns list of model name, score on 20% of train sample and fitted model object
        return [model_name, mod_score, mod_fit]
开发者ID:nyszom,项目名称:Criteo_Kaggle_example,代码行数:54,代码来源:nyszom_models.py

示例9: main

# 需要导入模块: from sklearn.tree import DecisionTreeClassifier [as 别名]
# 或者: from sklearn.tree.DecisionTreeClassifier import score [as 别名]
def main():
    Xtrain, Ytrain, Xtest, Ytest, word2idx = get_data()

    # convert to numpy arrays
    Xtrain = np.array(Xtrain)
    Ytrain = np.array(Ytrain)

    # convert Xtrain to indicator matrix
    N = len(Xtrain)
    V = len(word2idx) + 1
    print "vocabulary size:", V
    # Xtrain_indicator = np.zeros((N, V))
    # Xtrain_indicator[np.arange(N), Xtrain] = 1

    # decision tree
    dt = DecisionTreeClassifier()

    # without indicator
    dt.fit(Xtrain.reshape(N, 1), Ytrain)
    print "dt train score:", dt.score(Xtrain.reshape(N, 1), Ytrain)
    p = dt.predict(Xtrain.reshape(N, 1))
    print "dt train f1:", f1_score(Ytrain, p, average=None).mean()

    # with indicator -- too slow!!
    # dt.fit(Xtrain_indicator, Ytrain)
    # print "dt score:", dt.score(Xtrain_indicator, Ytrain)

    # train and score
    model = LogisticRegression()
    model.fit(Xtrain, Ytrain, V=V)
    print "training complete"
    print "lr train score:", model.score(Xtrain, Ytrain)
    print "lr train f1:", model.f1_score(Xtrain, Ytrain)


    Ntest = len(Xtest)
    Xtest = np.array(Xtest)
    Ytest = np.array(Ytest)
    # convert Xtest to indicator
    # Xtest_indicator = np.zeros((Ntest, V))
    # Xtest_indicator[np.arange(Ntest), Xtest] = 1

    # decision tree test score
    print "dt test score:", dt.score(Xtest.reshape(Ntest, 1), Ytest)
    p = dt.predict(Xtest.reshape(Ntest, 1))
    print "dt test f1:", f1_score(Ytest, p, average=None).mean()
    # print "dt test score:", dt.score(Xtest_indicator, Ytest) # too slow!

    # logistic test score -- too slow!!
    print "lr test score:", model.score(Xtest, Ytest)
    print "lr test f1:", model.f1_score(Xtest, Ytest)
开发者ID:ShuvenduBikash,项目名称:machine_learning_examples,代码行数:53,代码来源:pos_baseline.py

示例10: decision_tree_train

# 需要导入模块: from sklearn.tree import DecisionTreeClassifier [as 别名]
# 或者: from sklearn.tree.DecisionTreeClassifier import score [as 别名]
def decision_tree_train(X_train, X_test, y_train, y_test, depth=10):
	print "Running Decision Tree Classifier to max depth of", depth
	depths = np.arange(1,11)
	train_errors = []
	test_errors = []
	for d in depths:
		clf = DecisionTreeClassifier(max_depth=depth).fit(X_train, y_train)
		train_error = 1.0 - clf.score(X_train, y_train)
		test_error = 1.0 - clf.score(X_test, y_test)
		print "depth:", d, "train score:", 1.0 - train_error, "test score:", 1.0 - test_error
		train_errors.append(train_error)
		test_errors.append(test_error)

	# plot
	bar_plot(depths, train_errors, test_errors, "Decision Tree Classifier", "depths")
开发者ID:ismailmustafa,项目名称:predictingRefugeeAsylum-avengers,代码行数:17,代码来源:main.py

示例11: DecisionTreecls

# 需要导入模块: from sklearn.tree import DecisionTreeClassifier [as 别名]
# 或者: from sklearn.tree.DecisionTreeClassifier import score [as 别名]
class DecisionTreecls(object):
    """docstring for ClassName"""
    def __init__(self):
        self.decision_tree_cls = DecisionTreeClassifier(max_depth=5)
        self.prediction = None
        self.train_x = None
        self.train_y = None

    def train_model(self, train_x, train_y):
        try:
            self.train_x = train_x
            self.train_y = train_y
            self.decision_tree_cls.fit(train_x, train_y)
        except:
            print(traceback.format_exc())

    def predict(self, test_x):
        try:
            self.test_x = test_x
            self.prediction = self.decision_tree_cls.predict(test_x)
            return self.prediction
        except:
            print(traceback.format_exc())

    def accuracy_score(self, test_y):
        try:
            # return r2_score(test_y, self.prediction)
            return self.decision_tree_cls.score(self.test_x, test_y)
        except:
            print(traceback.format_exc())
开发者ID:obaid22192,项目名称:machine-learning,代码行数:32,代码来源:classifiers.py

示例12: adaboost

# 需要导入模块: from sklearn.tree import DecisionTreeClassifier [as 别名]
# 或者: from sklearn.tree.DecisionTreeClassifier import score [as 别名]
def adaboost(X_train, X_test, y_train, y_test, M=10):

	weights = np.ones(y_train.shape[0])/y_train.shape[0]
	alphas = []
	weak_classifiers = []

	train_errors = []
	test_errors = []

	rounds = np.arange(1,M+1)
	for i in range(M):
		# weak classifier
		clf = DecisionTreeClassifier(max_depth=3).fit(X_train, y_train, sample_weight=weights)
		weak_classifiers.append(clf)

		# calculate error
		error = 1 - clf.score(X_train, y_train, sample_weight=weights)

		# calculate alpha
		alpha = np.log((1 - error)/error)
		alphas.append(alpha)

		# adjust weights
		p = clf.predict(X_train)
		for j in range(len(weights)):
			if p[j] != y_train[j]:
				weights[j] = (weights[j] * np.exp(alpha))

		train_error = adaboost_error(X_train, y_train, alphas, weak_classifiers)
		test_error  = adaboost_error(X_test, y_test, alphas, weak_classifiers)
		train_errors.append(train_error)
		test_errors.append(test_error)
		print "num rounds:", i+1, "train score:", 1.0 - train_error, "test score:", 1.0 - test_error

	train_test_score_plot(rounds, train_errors, test_errors, "Adaboost Classifier", "num rounds")
开发者ID:ismailmustafa,项目名称:predictingRefugeeAsylum-avengers,代码行数:37,代码来源:main.py

示例13: SingleDecisionTreeClassifier

# 需要导入模块: from sklearn.tree import DecisionTreeClassifier [as 别名]
# 或者: from sklearn.tree.DecisionTreeClassifier import score [as 别名]
def SingleDecisionTreeClassifier(pix):
	print "\nCreating HOG Dataset from MNIST Data"
	start_time = time.time()
	training_image_data_hog = [hog(img, orientations=9, pixels_per_cell=(pix,pix), cells_per_block=(3, 3))
					for img in training_image_data]
	testing_image_data_hog = [hog(img, orientations=9, pixels_per_cell=(pix, pix), cells_per_block=(3, 3))
					for img in testing_image_data]
	end_time = time.time() - start_time
	print "It took "+ str(end_time) + " to make the HOG Images"

	print '\nTraining data'
	start_time = time.time()
	single_decision_tree_classifier = DecisionTreeClassifier()
	single_decision_tree_classifier.fit(training_image_data_hog, training_label_data)
	end_time = time.time() - start_time
	print "It took "+ str(end_time) + " to train the classifier"
	print 'Training Completed'

	print '\nTesting data '
	start_time = time.time()
	single_decision_tree_classifier_accuracy = single_decision_tree_classifier.score(testing_image_data_hog, testing_label_data)
	end_time = time.time() - start_time
	print "It took "+ str(end_time) + " to test the data "
# 
	print '\n# printing Accuracy'
	print "\nTesting for Single Decision Tree Classifier with pixels per cell = ("+str(pix)+','+str(pix)+') :'
	print "-------------------------------------------------"
	print "\nSingleDecisionTreeClassifier accuracy for ("+str(pix)+','+str(pix)+") : "+ str(single_decision_tree_classifier_accuracy)

	return single_decision_tree_classifier_accuracy
开发者ID:luckysahani,项目名称:Machine-Learning-Decision-trees-and-forests,代码行数:32,代码来源:1.py

示例14: main

# 需要导入模块: from sklearn.tree import DecisionTreeClassifier [as 别名]
# 或者: from sklearn.tree.DecisionTreeClassifier import score [as 别名]
def main():
    location = "/home/joe/drivers/"
    z = sys.argv[1]
    k = int(sys.argv[2])
    trips = read_trips(location + z)
    fmatrix = feature_matrix(trips)
    n_rows, n_comps = fmatrix.shape
    train_fmatrix = fmatrix[: int(len(fmatrix) * 0.9)]
    train_targets = np.ones(len(train_fmatrix))
    test_fmatrix = fmatrix[int(len(fmatrix) * 0.9) :]
    test_targets = np.ones(len(test_fmatrix))
    targets = np.ones(len(fmatrix))
    i = k
    j = 0
    while i < (k + 20) and j < 10000:
        j += 1
        try:
            trips = read_trips(location + str(i))
            fm = feature_matrix(trips)
            train_fm = fm[:9]
            test_fm = fm[9:10]
            train_fmatrix = np.vstack((train_fmatrix, train_fm))
            test_fmatrix = np.vstack((test_fmatrix, test_fm))
            fmatrix = np.vstack((fmatrix, fm))

            targets = np.hstack((targets, np.zeros(len(fm)) * i))
            train_targets = np.hstack((train_targets, np.zeros(len(train_fm)) * i))
            test_targets = np.hstack((test_targets, np.zeros(len(test_fm)) * i))
            print(i)
            i += 1
        except IOError:
            pass

    pipeline = Pipeline([("scale", StandardScaler()), ("ICA", PCA(n_components=50))])
    pipeline.fit(fmatrix)
    train_trans = pipeline.transform(train_fmatrix)
    test_trans = pipeline.transform(test_fmatrix)

    print("point teng", num_t_targets / len(test_targets))
    gb = RandomForestClassifier(n_estimators=50)
    gb.fit(train_fmatrix, train_targets)
    gb_score = gb.score(test_fmatrix, test_targets)
    print("gb", gb_score)
    dt = DecisionTreeClassifier()
    dt.fit(train_fmatrix, train_targets)
    dt_score = dt.score(test_fmatrix, test_targets)
    print("dt", dt_score)
    svc = SVC()
    svc.fit(train_fmatrix, train_targets)
    svc_score = svc.score(test_fmatrix, test_targets)
    print("svc", svc_score)
    # rfs = [RandomForestClassifier(n_estimators=40) for i in range(20)]
    rfs = [GradientBoostingClassifier(n_estimators=100) for i in range(20)]
    scores = []
    for i, rf in enumerate(rfs):
        rf.fit(train_fmatrix, train_targets)
        scores.append(rf.score(test_fmatrix, test_targets))
        print(i, scores[i])
    print("average", np.mean(scores))
开发者ID:nagyistge,项目名称:seahorse-ml,代码行数:61,代码来源:combine_features.py

示例15: rand_forest_train

# 需要导入模块: from sklearn.tree import DecisionTreeClassifier [as 别名]
# 或者: from sklearn.tree.DecisionTreeClassifier import score [as 别名]
    def rand_forest_train(self):
        # 读取本地用户特征信息
        users = pd.read_csv('names.csv')
        # 选取similarity、platform、reputation、entropy作为判别人类或机器的特征
        X = users[['similarity', 'platform', 'reputation', 'entropy']]
        y = users['human_or_machine']

        # 对原始数据进行分割, 25%的数据用于测试
        from sklearn.cross_validation import train_test_split
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=33)

        # 对类别特征进行转化,成为特征向量
        from sklearn.feature_extraction import DictVectorizer
        vec = DictVectorizer(sparse=False)
        X_train = vec.fit_transform(X_train.to_dict(orient='record'))
        X_test = vec.transform(X_test.to_dict(orient='record'))

        # 使用单一决策树进行集成模型的训练及预测分析
        from sklearn.tree import DecisionTreeClassifier
        dtc = DecisionTreeClassifier()
        dtc.fit(X_train, y_train)
        dtc_y_pred = dtc.predict(X_test)

        # 使用随机森林分类器进行集成模型的训练及预测分析
        from sklearn.ensemble import RandomForestClassifier
        rfc = RandomForestClassifier()
        rfc.fit(X_train, y_train)
        rfc_y_pred = rfc.predict(X_test)

        # 使用梯度提升决策树进行集成模型的训练及预测分析
        from sklearn.ensemble import GradientBoostingClassifier
        gbc = GradientBoostingClassifier()
        gbc.fit(X_train, y_train)
        gbc_y_pred = gbc.predict(X_test)

        from sklearn.metrics import classification_report
        # 输出单一决策树在测试集上的分类准确性, 以及更加详细的精确率 召回率 F1指标
        print("单一决策树的准确性为", dtc.score(X_test, y_test))
        print(classification_report(dtc_y_pred, y_test))

        # 输出随机森林分类器在测试集上的分类准确性,以及更加详细的精确率 召回率 F1指标
        print("随机森林分类器的准确性为", rfc.score(X_test, y_test))
        print(classification_report(rfc_y_pred, y_test))

        # 输出梯度提升决策树在测试集上的分类准确性,以及更加详细的精确率 召回率 F1指标
        print("梯度提升决策树的准确性为", gbc.score(X_test, y_test))
        print(classification_report(gbc_y_pred, y_test))


        users = pd.read_csv('values.csv')

        # 检验是否为机器或人类
        X = users[['similarity', 'platform', 'reputation', 'entropy']]
        X = vec.transform(X.to_dict(orient='record'))
        print(rfc.predict(X))

        self.dtc = dtc
        self.rfc = rfc
        self.gbc = gbc
开发者ID:jryyufeng,项目名称:learngit,代码行数:61,代码来源:random_forest.py


注:本文中的sklearn.tree.DecisionTreeClassifier.score方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。