当前位置: 首页>>代码示例>>Python>>正文


Python SVC.score方法代码示例

本文整理汇总了Python中sklearn.svm.SVC.score方法的典型用法代码示例。如果您正苦于以下问题:Python SVC.score方法的具体用法?Python SVC.score怎么用?Python SVC.score使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.svm.SVC的用法示例。


在下文中一共展示了SVC.score方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: learn

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import score [as 别名]
def learn(dataset):
    # l1 or l2?
    # I want to use logistic regression, with liblinear
    # because I have small dataset
    # http://scikit-learn.org/stable/modules/linear_model.html#logistic-regression

    # target = [item[TYPE_INDEX] for item in dataset]
    # training_data = [item[1:TYPE_INDEX] for item in dataset]

    matrix_ds = np.asarray(dataset)
    # clf = linear_model.LogisticRegression()
    # clf = linear_model.Perceptron()
    clf = SVC(kernel="rbf", C=10, gamma=0.1)
    target = matrix_ds[:, TYPE_INDEX]
    training_data = matrix_ds[:, 1:TYPE_INDEX].astype(np.float)

    # 40% split
    data_train, data_test, target_train, target_test = train_test_split(
        training_data, target, test_size=0.4, random_state=0
    )

    # print target
    # print training_data

    clf.fit(data_train, target_train)
    # scores = cross_val_score(clf, X=data_train, y=target_train)
    # print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
    # print len(scores)

    # print data_test, target_test
    print clf.score(data_test, target_test)
开发者ID:ShivanKaul,项目名称:Design-Project,代码行数:33,代码来源:learn.py

示例2: train_model

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import score [as 别名]
def train_model(X, y, c):
    svm_clf = SVC(kernel='linear', C=c)

    crossvalidation = cross_validation.StratifiedKFold(y, n_folds=5)

    #fit the model
    clfs = []
    cms = []
    train_scores = []
    test_scores = []

    for train, test in crossvalidation:
        X_train, y_train = X[train], y[train]
        X_test, y_test = X[test], y[test]

        X_train, X_test = normalize_features(X_train, X_test)

        svm_clf.fit(X_train, y_train)

        train_score = svm_clf.score(X_train, y_train)
        train_scores.append(train_score)

        test_score = svm_clf.score(X_test, y_test)
        test_scores.append(test_score)

        y_predict = svm_clf.predict(X_test)
        cm = confusion_matrix(y_test, y_predict)
        cms.append(cm)

    return np.mean(test_scores), np.mean(train_scores), np.asarray(cms)
开发者ID:nwang57,项目名称:genreClassifier,代码行数:32,代码来源:svm_classifier.py

示例3: condition_on_grades

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import score [as 别名]
def condition_on_grades(user="c6961489"):
	c = new_conn.cursor()
	models = [None, None, None, None, None, None]
	for i in range(6):
		c.execute('SELECT easiness, ret_reps, ret_reps_since_lapse, lapses, pred_grade, acq_reps from discrete_log where user_id="%s" and grade=%d' % (user, i))
		x_train = np.array(c.fetchall())
		c.execute('SELECT interval_bucket from discrete_log where user_id="%s" and grade=%d' % (user, i))
		y_train = np.array(c.fetchall())[:,0]
		clf = SVC()
		clf.fit(x_train, y_train)
		print clf.score(x_train, y_train)
		models[i] = clf
	print "====================="
	c.execute('SELECT user_id from (select user_id, count(distinct grade) as cnt from discrete_log group by user_id) where cnt = 6 limit 5')
	users = [row[0] for row in c.fetchall()]
	scores = [0, 0, 0, 0, 0, 0]
	for user in users:
		for i in range(6):
			c.execute('SELECT easiness, ret_reps, ret_reps_since_lapse, lapses, pred_grade, acq_reps from discrete_log where user_id="%s" and grade=%d' % (user, i))
			x_train = np.array(c.fetchall())
			c.execute('SELECT interval_bucket from discrete_log where user_id="%s" and grade=%d' % (user, i))
			y_train = np.array(c.fetchall())[:,0]
			scores[i] += models[i].score(x_train, y_train)
	for i in range(6):
		scores[i] /= len(users);
		print scores[i]
开发者ID:sramas15,项目名称:Finding-Mnemosyne,代码行数:28,代码来源:rep_user.py

示例4: cvalidate

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import score [as 别名]
def cvalidate():
    from sklearn import cross_validation

    trainset = np.genfromtxt(open('train.csv','r'), delimiter=',')[1:]
    X = np.array([x[1:8] for x in trainset])
    y = np.array([x[8] for x in trainset])
    #print X,y
    import math
    for i, x in enumerate(X):
        for j, xx in enumerate(x):
            if(math.isnan(xx)):
                X[i][j] = 25.6
   
    #print X[0:3]
    #print y[0:3]
    X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size = 0.3, random_state = 0)

    #X_train, X_test = decomposition_pca(X_train, X_test)
    

    X_train, X_test = decomposition_pca(X_train, X_test)
    c_range = 10.0 ** np.arange(6.5,7.5,1)
    gamma_range = 10.0 ** np.arange(-2.5,0.5,1)
    #parameters = {'kernel':['rbf'], 'C':c_range} 
    parameters = {'kernel':['rbf'], 'C':c_range,  'gamma':gamma_range} 
    svr = SVC(kernel = 'rbf', C = 0.72, gamma = 0.299)

    #clf = grid_search.GridSearchCV(svr, parameters)

    #print clf.estimator
    ##clf = Pipeline([('scale', Scaler()), ('svm', SVC())])

    svr.fit(X_train, y_train)
    print svr.score(X_test, y_test)
开发者ID:kingr13,项目名称:entire-src,代码行数:36,代码来源:svm.py

示例5: SVCTwoDValidationCurve

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import score [as 别名]
def SVCTwoDValidationCurve(DataTable,Gammavec,Cvec, kernel_,n):
    acc_train, acc_val, trainMinusValidation = [], [], []
    for thisGamma in Gammavec:
        for thisC in Cvec:
            X,classifier= (DataTable[0:n-1,1:].astype(np.float), DataTable[0:n-1,0].astype(np.int))
            X_val,classifier_val=(DataTable[n:,1:].astype(np.float), DataTable[n:,0].astype(np.int))
            if kernel_=='rbf':
                TrainedModel=SVC(C=thisC, kernel=kernel_, gamma=thisGamma).fit(X,classifier)
            #acc_train.append(TrainedModel.score(X,classifier))
            #acc_val.append(TrainedModel.score(X_val, classifier_val))
            trainMinusValidation.append(TrainedModel.score(X,classifier)-TrainedModel.score(X_val, classifier_val))


    X, Y = np.meshgrid(Cvec,Gammavec)
    #trainMinusValidation=np.array(acc_train)-np.array(acc_val)
    fig=plt.figure()
    #ax=plt.gca()
    #train=ax.scatter(Gammavec,acc_train, color='red')
    #crossval=ax.scatter(Gammavec,acc_val, color='blue')
    #ax.set_yscale('log')
    plt.pcolormesh(X,Y,np.array(trainMinusValidation))
    plt.colorbar() #need a colorbar to show the intensity scale
    plt.xlabel("Value of parameter C")
    plt.ylabel("Value of parameter gamma")
    #plt.legend([train,crossval], ["Training accuracy","Validation accuracy"])
    fig.savefig('validation2DCurve.png')
开发者ID:dnash,项目名称:PrivateDataAnalysisToys,代码行数:28,代码来源:importAndPlotData_v2.py

示例6: run_ml

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import score [as 别名]
def run_ml(nlp_features, actual_shortest_path, is_dev=True):
    (design_matrix, output_labels) = create_design_matrix_and_labels(nlp_features, \
        actual_shortest_path)
    
    #X, X_test, y, y_test = cross_validation,train_test_split(design_matrix, output_labels, test_size=5000, random_state=0)
    #X_train, X_dev, y_train, y_dev = cross_validation,train_test_split(X, y, test_size=5000, random_state=0)

    frac = int(0.15 * len(nlp_features))
    X_test, y_test, X_dev, y_dev, X_train, y_train = split_test_dev_train(design_matrix, output_labels, frac, frac)

    #model = LinearRegression().fit(X_train, y_train)
    #model = SVR().fit(X_train, y_train) # regression
    model = SVC().fit(X_train, y_train) # classification
    #model = OneVsRestClassifier(LogisticRegression()).fit(X_train, y_train)

    score_test_or_dev = None
    score_train = None

    if is_dev:
        score_test_or_dev = model.score(X_dev, y_dev)
    else:
        score_test_or_dev = model.score(X_test, y_test)

    score_train = model.score(X_train, y_train)
    y_predicted_dev = model.predict(X_dev)
    y_actual_dev = y_dev

    f1 = f1_score(y_actual_dev, y_predicted_dev, average='macro')

    #conf_matrix = confusion_matrix(y_actual_dev, y_predicted_dev)

    return (f1, score_test_or_dev, score_train, y_predicted_dev, y_actual_dev)
开发者ID:msushkov,项目名称:cs224w-wiki,代码行数:34,代码来源:ml.py

示例7: quick_train

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import score [as 别名]
def quick_train():
    data = numpy.load(os.path.join(featureroot,"fc6features.npy"))
    #data = numpy.load(os.path.join(featureroot,"pool5features.npy"))
    labels = numpy.load(os.path.join(featureroot,"labels.npy"))

    labelslist = sorted(list(set(labels)))

    #print labelslist

    counter = 0.0
    for label in labelslist:
        labels[labels == label] = counter
        counter += 1

    labels = numpy.reshape(labels,(20000,1)).astype(numpy.float32)
    
    alldata = numpy.hstack((data,labels))

    numpy.random.shuffle(alldata)

    data = alldata[:,0:-1]
    labels = alldata[:,-1].astype(numpy.int)

    #classifier = LinearSVC()
    classifier = SVC(C=0.00001,kernel="linear",probability=True)

    classifier.fit(data[0:2000], labels[0:2000])
    print classifier.score(data[2000:], labels[2000:])

    joblib.dump(classifier,os.path.join(featureroot,"classifierQuick.pk1"))
开发者ID:Zebreu,项目名称:SketchingAI,代码行数:32,代码来源:gendraw.py

示例8: run

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import score [as 别名]
def run(N):
    X = []
    Y = []
    generate_data(N, X, Y)

    #RBF - runs Lloyd's algorithm for initial random K clusters
    rbf = []
    rbf = My_Rbf(k=12, gamma=1.5)
    rbf.fit(X, Y)
    rbf_Ein = rbf.test(X, Y)

    #SVM - hard margin - C is really infinity
    svc = []
    svc = SVC(C=10000, gamma=1.5, kernel='rbf')
    svc.fit(X, Y)
    svc_Ein = (1.0 - svc.score(X, Y))

    #generate test data
    X_test = []
    Y_test = []
    generate_data(1000, X_test, Y_test)

    #test rbf
    rbf_Eout = rbf.test(X_test, Y_test)

    #test svc
    svc_Eout = (1.0 - svc.score(X_test, Y_test))

    #return results
    return {'rbf_Ein': rbf_Ein, 'svc_Ein': svc_Ein, 'rbf_Eout': rbf_Eout, 'svc_Eout': svc_Eout}
开发者ID:coconaut,项目名称:machine-learning,代码行数:32,代码来源:rbf_vs_svm.py

示例9: cvalidate

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import score [as 别名]
def cvalidate():
    from sklearn import cross_validation

    trainset = np.genfromtxt(open('train','r'), delimiter=' ')
    targetset = np.genfromtxt(open('target','r'), delimiter=' ')
    X = np.array([x[0:64] for x in trainset])
    y = np.array([x for x in targetset])
    #print X,y
    X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size = 0.3, random_state = 0)

    #X_train, X_test = decomposition_pca(X_train, X_test)
    

    X_train, X_test = decomposition_pca(X_train, X_test)
    c_range = 10.0 ** np.arange(6.5,7.5,1)
    gamma_range = 10.0 ** np.arange(-2.5,0.5,1)
    #parameters = {'kernel':['rbf'], 'C':c_range} 
    parameters = {'kernel':['rbf'], 'C':c_range,  'gamma':gamma_range} 
    svr = SVC(kernel = 'rbf', C = 0.72, gamma = 0.299)

    #clf = grid_search.GridSearchCV(svr, parameters)

    #print clf.estimator
    ##clf = Pipeline([('scale', Scaler()), ('svm', SVC())])

    svr.fit(X_train, y_train)
    print svr.score(X_test, y_test)
开发者ID:kingr13,项目名称:entire-src,代码行数:29,代码来源:svmscore.py

示例10: SVCLearningCurve

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import score [as 别名]
def SVCLearningCurve(DataTable, params, kernel_):
    acc_train, acc_val, numsamples = [], [], []
    N= np.shape(DataTable)[0]
    #for n in [20*x for x in range(N/30)]:
    for n in [50*x for x in range(N/50)]:
        if n==0:
            continue
        X,classifier= (DataTable[0:n-1,1:].astype(np.float), DataTable[0:n-1,0].astype(np.int))
        X_val,classifier_val=(DataTable[n:,1:].astype(np.float), DataTable[n:,0].astype(np.int))
        #print classifier
        if kernel_=='linear':
            TrainedModel=SVC(C=params[0], kernel=kernel_).fit(X,classifier)
        if kernel_=='rbf':
            TrainedModel=SVC(C=params[0], kernel=kernel_, gamma=params[1]).fit(X,classifier)
        acc_train.append(TrainedModel.score(X,classifier))
        acc_val.append(TrainedModel.score(X_val, classifier_val))

        numsamples.append(n)

    fig=plt.figure()
    ax=plt.gca()
    train=ax.scatter(numsamples,acc_train, color='red')
    crossval=ax.scatter(numsamples,acc_val, color='blue')
    #ax.set_yscale('log')
    plt.ylabel("Accuracy")
    plt.xlabel("Number of training samples")
    plt.legend([train,crossval], ["Training accuracy","Validation accuracy"], 'lower right')
    fig.savefig('learningCurve.png')
开发者ID:dnash,项目名称:PrivateDataAnalysisToys,代码行数:30,代码来源:importAndPlotData_v2.py

示例11: problem2_3_4

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import score [as 别名]
def problem2_3_4():
    """
    Implement class-1 vs. all classifiers,
    with C = 0.01 and Q = 2 and compare performances.
    """
    C = 0.01
    Q = 2
    clf = SVC(C=C, kernel="poly", degree=Q, gamma=1, coef0=1)
    Ein = []
    Eout = []
    nSV = []

    # fit 10 classifiers for each digit
    for i in xrange(10):
        y = trainingData[:, 0]
        y = np.array([1 if j == i else -1 for j in y])
        clf.fit(trainingData[:, 1:], y)
        nSV.append(sum(clf.n_support_))
        # get Ein
        Ein.append(1 - clf.score(trainingData[:, 1:], y))

        # get Eout
        y = testData[:, 0]
        y = np.array([1 if j == i else -1 for j in y])
        Eout.append(1 - clf.score(testData[:, 1:], y))

    idx_maxEin = np.argmax(Ein)
    idx_minEin = np.argmin(Ein)
    print "%d vs. all has the highest Ein" % idx_maxEin
    print "%d vs. all has the lowest Ein" % idx_minEin
    print "Classifier (%d vs all) had %d more support vectors than Classifier (%d vs all)" % (
        idx_maxEin,
        nSV[idx_maxEin] - nSV[idx_minEin],
        idx_minEin,
    )
开发者ID:allenyin,项目名称:LFD,代码行数:37,代码来源:pset8.py

示例12: run

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import score [as 别名]
def run(dataset):

    train_X, train_y = dataset['train']
    dev_X, dev_y = dataset['dev']
    test_X, test_y = dataset['test']

    # param tuning

    param_grid = [
        {
            'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
            'kernel': ['linear'],
            'gamma': ['auto']
            },
        {
            'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
            'kernel': ['rbf'],
            'gamma': ['auto', 0.001, 0.01, 0.1, 1]
            }
        ]

    best_params = {}
    best_accuracy = 0

    clf = SVC(verbose=False)
    for d in param_grid:
        keys = d.keys()
        for v1 in d[keys[0]]:
            for v2 in d[keys[1]]:
                for v3 in d[keys[2]]:
                    params = {keys[0]: v1, keys[1]: v2, keys[2]: v3}
                    print 'Params:', params
                    clf.set_params(**params)
                    clf.fit(train_X, train_y)
                    acc_test = clf.score(dev_X, dev_y)
                    acc_train = clf.score(train_X, train_y)
                    print 'Train Acc:', acc_train
                    print 'Dev Acc:', acc_test
                    if acc_test > best_accuracy:
                        best_accuracy = acc_test
                        best_params = params
    clf.set_params(**best_params)
    clf.fit(train_X, train_y)
    print best_params
    print 'Predicting...'
    predict_y = clf.predict(train_X)
    Acc, MCC = score(train_y, predict_y)
    print 'Training Data Eval:'
    print 'Acc: {}%\tMCC: {}%'.format(round(Acc*100, 2), round(MCC*100, 2))

    predict_y = clf.predict(dev_X)
    Acc, MCC = score(dev_y, predict_y)
    print 'Development Data Eval:'
    print 'Acc: {}%\tMCC: {}%'.format(round(Acc*100, 2), round(MCC*100, 2))

    predict_y = clf.predict(test_X)
    Acc, MCC = score(test_y, predict_y)
    print 'Test Data Eval:'
    print 'Acc: {}%\tMCC: {}%'.format(round(Acc*100, 2), round(MCC*100, 2))
开发者ID:Shuailong,项目名称:StockPrediction,代码行数:61,代码来源:svm_train.py

示例13: testModelWithHyperParameter

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import score [as 别名]
def testModelWithHyperParameter(train_xValues, train_yValues, test_xValues, test_yValues, cValue, kernel_name):
	clf = SVC(C=cValue,kernel=kernel_name)
	clf.fit(train_xValues, train_yValues)
	trainAcc = clf.score(train_xValues, train_yValues)
	testAcc = clf.score(test_xValues, test_yValues)
	prediction = clf.predict(test_xValues)
	#print("C: " + str(cValue), "Train Accuracy: " + str(round(trainAcc*100, 2)) + "%", "Test Accuracy: " + str(round(testAcc*100, 2)) + "%\n")
	#print("SVM Classifier, Accuracy - " + str(round(testAcc, 2)) + "%")
	return (prediction, testAcc)
开发者ID:wasiahmad,项目名称:Insulting-Comment-Detection,代码行数:11,代码来源:svmClassifier.py

示例14: main

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import score [as 别名]
def main():
    #read in  data, parse into training and target sets
    data = csv_io.read_data("./filtered_classes.csv")
    target = np.array( [x[0] for x in data] )
    train = np.array( [x[1:] for x in data] )
    train_scaled = preprocessing.scale(train)

    X_train, X_test, y_train, y_test = cross_validation.train_test_split(train_scaled, target, test_size = 0.8)
    clf  = SVC(kernel='linear', C=0.005).fit(X_train, y_train)
    print clf.score(X_test, y_test)
开发者ID:sid9211,项目名称:MillionSongAttribution,代码行数:12,代码来源:svc.py

示例15: run

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import score [as 别名]
	def run(self) :
		"""
		The main loop for the TrainSVMs thread; trains an SVM for each point in the parameter space the
		user wishes to explore. Data is passed outside by putting it on the PlotDataQ. Periodic calls
		to self.verifygo() are used to make sure the user still wants to continue. In this version it is
		not possible to stop the thread until the current SVM finishes training, which could cause slow
		stopping response for large datasets.

		Note that this thread holds the lock on the feature vector database for its entire execution so
		the database is guaranteed not to change while it's running. The final actions of this thread are
		to release the database lock by closing the connection, deactivate the callback of the Stop button
		widget in the main GUI, and release the PauseLock for CPU heavy threads.
		"""
		assert not self.PauseLock.acquire(False), "The Stop button lock must be acquired before starting the TrainSVMs thread."
		try :
			jj = 0
			step = self.ParamSpace['Step']
			MaxTraining = self.ParamSpace['MaxTraining']
			listchoice = self.ParamSpace['WordList']
			Cs = self.ParamSpace['Costs']
	
			logging.debug('Connecting to database at %s'%self.DBpath)
			self.DBobj.ConnectDB(self.DBpath)
			logging.debug('Result: %s'%self.DBobj.DB_Connect)
			try :
				NumTraining = self.DBobj.GetTrainSampleCount()
				NumTraining = min(NumTraining,MaxTraining)
				Xcv,Ycv = self.DBobj.GetXY(listchoice,1)
				Xtrain = []
				Ytrain = []
				for m in range(0,NumTraining,step) :
					self.verifygo()
					Xs,Ys = self.DBobj.GetXY(listchoice,0,step,m)
					Xtrain.extend(Xs)
					Ytrain.extend(Ys)
					for cost in Cs :
						self.verifygo()
						clf = SVC(C=cost,kernel='linear')
						clf.fit(Xtrain,Ytrain)
						self.verifygo()
						TrainScore = clf.score(Xtrain,Ytrain)
						CVScore = clf.score(Xcv,Ycv)
						self.PlotDataQ.put((clf,m+step,cost,TrainScore,CVScore))
						logging.debug('%d, %d, %f, %f, %f'%(jj, m+step, cost, TrainScore, CVScore))
						jj += 1
			except Exception as detail :
				logging.error("Did not complete all the assigned SVM training: %s"%detail)
			finally :
				logging.debug('Disconnecting database at %s'%self.DBobj.DB_Connect)
				self.DBobj.DisconnectDB()
				logging.debug('Result: %s'%self.DBobj.DB_Connect)
		finally :
			self.PauseRef.configure(command=None)
			self.PauseLock.release()
开发者ID:joecole889,项目名称:spam-filter,代码行数:56,代码来源:TrainSVMs.py


注:本文中的sklearn.svm.SVC.score方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。