当前位置: 首页>>代码示例>>Python>>正文


Python SVC.predict方法代码示例

本文整理汇总了Python中sklearn.svm.SVC.predict方法的典型用法代码示例。如果您正苦于以下问题:Python SVC.predict方法的具体用法?Python SVC.predict怎么用?Python SVC.predict使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.svm.SVC的用法示例。


在下文中一共展示了SVC.predict方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: solve_multiclass

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import predict [as 别名]
def solve_multiclass(trn, trn_ents, trn_lbl, tst, tst_ents,tst_lbl, relations,logfile, fractions, f, d=0, stopthresh=10):
    blor= alg.FeatureGenerationAlt(trn, trn_ents, trn_lbl, relations)
    blor.generate_features(500, d)  
    trn, trn_lbl, tst, feature_names= blor.get_new_table(tst, tst_ents)
    with open('trn_and_tst_%d_%d.pkl'%(d, f), 'wb') as fptr:
        cPickle.dump((trn, trn_lbl, tst, tst_lbl, feature_names), fptr, -1)
        #TODO: feature trees also. can analyze them!

    #TODO: selection, run all 5...
    
    from sklearn.svm import SVC
    from sklearn.neighbors import KNeighborsClassifier
    from sklearn.tree import DecisionTreeClassifier
    blah1=zeros((19,4)) #accuracy,precision,recall,F1
    blah2=zeros((19,4))
    blah3=zeros((19,4))
    for i,fraction in enumerate(fractions):
        new_trn, new_tst= feature_select_ig(trn, trn_lbl, tst, fraction)
        
        clf= SVC(kernel='linear', C=100)
        clf.fit(new_trn, trn_lbl)    
        blah1[i,:]= calc_stats(clf.predict(new_tst),tst_lbl)
        
        clf= KNeighborsClassifier(n_neighbors=3)
        clf.fit(new_trn, trn_lbl)    
        blah2[i,:]= calc_stats(clf.predict(new_tst),tst_lbl)
        
        clf=DecisionTreeClassifier(criterion='entropy', min_samples_split=8, random_state=0)
        clf.fit(new_trn, trn_lbl)    
        blah3[i,:]= calc_stats(clf.predict(new_tst),tst_lbl)
               
    return blah1, blah2, blah3, len(blor.new_features)
开发者ID:lioritan,项目名称:Thesis,代码行数:34,代码来源:ohsumedTitleOnly_compete.py

示例2: measure_accuracy

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import predict [as 别名]
def measure_accuracy(features_train, labels_train, features_test, labels_test):
    """ compute the accuracy of your Naive Bayes classifier """
    ### import the sklearn module for GaussianNB
    from sklearn.svm import SVC

    ###slice down the training data
    features_train_small = features_train[: len(features_train) / 100]
    labels_train_small = labels_train[: len(labels_train) / 100]

    ### create classifier
    clf = SVC(kernel="linear")
    l_model = []
    l_c = [10.0, 100.0, 1000.0, 10000.0]
    for f_c in l_c:
        l_model.append(SVC(kernel="rbf", C=f_c))

        ### fit the classifier on the training features and labels
    print "calculating for a small sample"
    t0 = time()
    l_clf = []
    for clf in l_model:
        l_clf.append(clf.fit(features_train_small, labels_train_small))

        # print "calculating for the total sample"
    clf_all = SVC(kernel="rbf", C=10000.0)
    clf_all = clf_all.fit(features_train, labels_train)

    print "training time:", round(time() - t0, 3), "s"

    ### use the trained classifier to predict labels for the test features
    t0 = time()
    l_pred = []
    for clf in l_clf:
        l_pred.append(clf.predict(features_test))

    pred_all = clf_all.predict(features_test)

    ### calculate and return the accuracy on the test data
    ### this is slightly different than the example,
    ### where we just print the accuracy
    ### you might need to import an sklearn module
    # accuracy = clf.score(features_test, labels_test)

    # another way
    from sklearn.metrics import accuracy_score

    for pred, c in zip(l_pred, l_c):
        acc = accuracy_score(pred, labels_test)
        print "Accuracy found for C={}: {:.4f}".format(c, acc)

    acc = accuracy_score(pred_all, labels_test)
    print "Accuracy found for C={} to all data: {:.4f}".format(10000, acc)

    print "prediction to the element {}: {}".format(26, l_pred[-1][10])
    print "prediction to the element {}: {}".format(26, l_pred[-1][26])
    print "prediction to the element {}: {}".format(26, l_pred[-1][50])

    print "summing all results: {}".format(sum(pred_all))

    print "predicting time:", round(time() - t0, 3), "s"
开发者ID:aquibjaved,项目名称:intro_ML,代码行数:62,代码来源:svm_author_id.py

示例3: main

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import predict [as 别名]
def main():
    """ Test SVM from scikit learn on mnist data set.""" 

    (X_train, Y_train), (X_test, Y_test) =  mnist.load_data() 
  
    # preprocess data
    X_train = X_train.reshape(60000, 784)
    X_test = X_test.reshape(10000, 784)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255

    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test samples')


    model = SVC(kernel='rbf', gamma=0.02, C=10) 
    model.fit(X_train, Y_train)
    
    train_yy = model.predict(X_train)
    test_yy = model.predict(X_test) 

    train_err = 100*mean_squared_error(train_yy, Y_train) 
    test_err = 100*mean_squared_error(test_yy, Y_test) 
    
    print("Train. err:", train_err) 
    print("Test err:", test_err) 

    train_acc = accuracy_score(Y_train, train_yy)  
    test_acc = accuracy_score(Y_test, test_yy) 

    pickle.dump(model, open("svm_rbf", "wb"))
开发者ID:PetraVidnerova,项目名称:scikit-mnist,代码行数:35,代码来源:eval.py

示例4: solve_multiclass

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import predict [as 别名]
def solve_multiclass(trn, trn_ents, trn_lbl, test, tst_ents,tst_lbl, relations,logfile, fractions, f, d=0, stopthresh=10):
    blor= alg.FeatureGenerationFromRDF(trn, trn_ents, trn_lbl, relations)
    blor.generate_features(400*(d**2), d, stopthresh, 20, logfile, stopthresh)  

    #TODO: selection, run all 5...
    
    from sklearn.svm import SVC
    from sklearn.neighbors import KNeighborsClassifier
    blah1=zeros((19,19, 4)) #accuracy,precision,recall,F1
    blah2=zeros((19,19, 4))
    blue=zeros(19)
    for depth in range(1, 20):
        idx= depth-1
        import pdb
        pdb.set_trace()
        trn, trn_lbl, tst, feature_num= blor.get_new_table(test, tst_ents, depth)
        blue[idx]=feature_num

        for i,fraction in enumerate(fractions):
            new_trn, new_tst= feature_select_ig(trn, trn_lbl, tst, fraction)
        
            clf= SVC(kernel='linear', C=10)
            clf.fit(new_trn, trn_lbl)    
            blah1[i,idx, :]= calc_stats(clf.predict(new_tst),tst_lbl)
        
            clf= KNeighborsClassifier(n_neighbors=3)
            clf.fit(new_trn, trn_lbl)    
            blah2[i,idx, :]= calc_stats(clf.predict(new_tst),tst_lbl)
               
    return blah1, blah2, blue
开发者ID:lioritan,项目名称:Thesis,代码行数:32,代码来源:ohsumedTitleOnly_depth.py

示例5: launch_svm

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import predict [as 别名]
def launch_svm():
    logging.basicConfig(
        level=logging.DEBUG,
        format='%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s',
        filename=LOG_FILE)
    logging.info(':::START OF EXPERIMENT:::')

    training_data = np.load(TRAINING_FILE)
    test_data = np.load(TEST_FILE)
    print test_data['test_data'].shape
    print str(training_data['training_labels_raw'][:20])
    #svm = LinearSVC()
    #svm = SVC(kernel='linear')
    #svm = OneVsRestClassifier(LinearSVC(random_state=0))
    svm = SVC(kernel='rbf')
    logging.info('::::Model Train Begin:::')
    svm.fit(training_data['training_data'], training_data['training_labels_raw'])
    #print svm.n_support_
    logging.info('::::Model Train Complete:::')
    #with open(SVM_FILE, 'wb') as file_dump:
    #    cPickle.dump(svm, file_dump)
    train_acc = accuracy_score(training_data['training_labels_raw'], svm.predict(training_data['training_data']))
    logging.info('training acc:' + str(train_acc))
    print train_acc
    #scores = cross_validation.cross_val_score(svm, training_data['training_data'][:5000], training_data['training_labels_raw'][:5000], cv=5)
    #print("CV Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
    predicted_label = svm.predict(test_data['test_data'])
    #predicted_label = svm.predict(training_data['training_data'][100:200])
    print predicted_label[:20]
    acc= accuracy_score(test_data['test_labels_raw'], predicted_label)
    prec = precision_score(test_data['test_labels_raw'], predicted_label, average=None)
    rec = recall_score(test_data['test_labels_raw'], predicted_label, average=None)
    logging.info('acc:'+str(acc)+'\nprec:'+str(prec)+'\nrec:'+str(rec))
    logging.info(':::END OF EXPERIMENT:::')
开发者ID:imoonkey,项目名称:thin_nn,代码行数:36,代码来源:svm.py

示例6: NormalSVCTrainer

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import predict [as 别名]
class NormalSVCTrainer(AbstractLearner):
    def __init__(self, kernel='linear', gamma='auto', penalty=1.0, cache=200, scale=True, scheme='ovr', class_w='balanced'):
        self.learner = SVC(C=penalty, kernel=kernel, gamma=gamma, probability=True, cache_size=cache, decision_function_shape=scheme,
                           class_weight=class_w)
        self.kernel = kernel
        self.gamma = gamma
        self.penalty = penalty
        self.scheme = scheme
        self.scale = scale

    def _train(self, x_train, y_train):
        if self.scale:
            self.scaler = preprocessing.StandardScaler().fit(x_train)
            x_scaled = self.scaler.transform(x_train)
            self.learner = self.learner.fit(x_scaled, y_train)
        else:
            self.learner = self.learner.fit(x_train, y_train)

    def _predict(self, x):
        if self.scale:
            x_scaled = self.scaler.transform(x)
            return self.learner.predict(x_scaled)
        else:
            return self.learner.predict(x)

    def _predict_proba(self, x):
        if self.scale:
            x_scaled = self.scaler.transform(x)
            return self.learner.predict_proba(x_scaled)
        else:
            return self.learner.predict_proba(x)

    def __str__(self):
        return 'SVC (kernel=%s, penalty: %f, scheme: %s, gamma=%s)' % \
               (self.kernel, self.penalty, self.scheme, str(self.gamma))
开发者ID:Zepheus,项目名称:ml-traffic,代码行数:37,代码来源:normal_svc.py

示例7: main

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import predict [as 别名]
def main():
    ap = argparse.ArgumentParser()
    ap.add_argument("-i", "--image", required = True, help = "Path to the image")
    args = vars(ap.parse_args())

    image = cv2.imread(args["image"])
    rects, img = detect(image)

    cropped = []

    for idx, (x1, y1, x2, y2) in enumerate(rects):
        crop_img = image[y1:y1 + (y2 - y1), x1:x1 + (x2 - x1)]
        crop_img = cv2.resize(crop_img, (100,100), interpolation = cv2.INTER_AREA)
        cv2.imshow("image" + str(idx), crop_img)
        new_img = crop_img.reshape(crop_img.shape[0] * crop_img.shape[1], 3)
        cropped.append(new_img.flatten())

    # reduce feature size
    cropped_pca = []
    pca = RandomizedPCA(n_components=100)
    cropped_pca = pca.fit_transform(cropped)

    # training (hardcoded for now)
    clf   = SVC(probability=True)
    train = cropped_pca[:7]
    test  = cropped_pca[7:13]
    # clf.fit([[0,0],[1,1]], [1, 2])
    clf.fit(train, [1,2,2,1,2,1,1])

    for item in test:
        print clf.predict_proba(item)
        print clf.predict(item)

    cv2.waitKey(0)
开发者ID:shulhi,项目名称:opencv-playground,代码行数:36,代码来源:crop_faces_ml.py

示例8: classify_svm

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import predict [as 别名]
def classify_svm(text):

	coarse_X = sets['coarse_training_qs']
	coarse_Y = sets['coarse_training_targets']
	fine_X = sets['fine_training_qs']
	fine_Y = sets['fine_training_targets']

	vectz = TfidfVectorizer(min_df=2, decode_error="ignore")
	coarse_X = vectz.fit_transform(coarse_X)	
	fine_X = vectz.fit_transform(fine_X)
	array_to_classify = vectz.transform([text]).toarray()

	
	# coarse
	svm_coarse = SVC(C=1000, gamma = 0.001, kernel='rbf')
	svm_coarse.fit(coarse_X, coarse_Y)
	# predict
	coarse_predict = svm_coarse.predict(array_to_classify)

	# fine
	svm_fine = SVC(C=1000, gamma = 0.001, kernel='rbf')
	svm_fine.fit(fine_X, fine_Y)
	# predict
	fine_predict = svm_fine.predict(array_to_classify)

	results={}
	results['coarse_class'] = coarse_predict[0] 
	results['fine_class'] = fine_predict[0]

	return results
开发者ID:el9335,项目名称:QUAILS_1.0,代码行数:32,代码来源:serv.py

示例9: main

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import predict [as 别名]
def main():
    X, X_labels = multivariate_normal.load_data_with_label()
    X_train, X_test, y_train, y_test = train_test_split(X, X_labels)

    clf = SVC()
    clf.fit(X_train, y_train)
    pred = clf.predict(X_test)
    X_labels_uniq = map(np.str, np.unique(X_labels))
    print classification_report(y_test, pred,
                                target_names=X_labels_uniq)

    # plot decision boundary with meshgrid
    h = 0.1
    x_min, x_max = X_train[:, 0].min() - 1, X_train[:, 0].max() + 1
    y_min, y_max = X_train[:, 1].min() - 1, X_train[:, 1].max() + 1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                         np.arange(y_min, y_max, h))
    Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)
    plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)

    # plot also the training points
    plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=plt.cm.Paired)
    plt.xlim(x_min, x_max)
    plt.ylim(y_min, y_max)
    plt.show()
开发者ID:JackBass,项目名称:ml-algorithms-simple,代码行数:28,代码来源:svm_sample.py

示例10: run

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import predict [as 别名]
def run(dataset):

    train_X, train_y = dataset['train']
    dev_X, dev_y = dataset['dev']
    test_X, test_y = dataset['test']

    # param tuning

    param_grid = [
        {
            'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
            'kernel': ['linear'],
            'gamma': ['auto']
            },
        {
            'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
            'kernel': ['rbf'],
            'gamma': ['auto', 0.001, 0.01, 0.1, 1]
            }
        ]

    best_params = {}
    best_accuracy = 0

    clf = SVC(verbose=False)
    for d in param_grid:
        keys = d.keys()
        for v1 in d[keys[0]]:
            for v2 in d[keys[1]]:
                for v3 in d[keys[2]]:
                    params = {keys[0]: v1, keys[1]: v2, keys[2]: v3}
                    print 'Params:', params
                    clf.set_params(**params)
                    clf.fit(train_X, train_y)
                    acc_test = clf.score(dev_X, dev_y)
                    acc_train = clf.score(train_X, train_y)
                    print 'Train Acc:', acc_train
                    print 'Dev Acc:', acc_test
                    if acc_test > best_accuracy:
                        best_accuracy = acc_test
                        best_params = params
    clf.set_params(**best_params)
    clf.fit(train_X, train_y)
    print best_params
    print 'Predicting...'
    predict_y = clf.predict(train_X)
    Acc, MCC = score(train_y, predict_y)
    print 'Training Data Eval:'
    print 'Acc: {}%\tMCC: {}%'.format(round(Acc*100, 2), round(MCC*100, 2))

    predict_y = clf.predict(dev_X)
    Acc, MCC = score(dev_y, predict_y)
    print 'Development Data Eval:'
    print 'Acc: {}%\tMCC: {}%'.format(round(Acc*100, 2), round(MCC*100, 2))

    predict_y = clf.predict(test_X)
    Acc, MCC = score(test_y, predict_y)
    print 'Test Data Eval:'
    print 'Acc: {}%\tMCC: {}%'.format(round(Acc*100, 2), round(MCC*100, 2))
开发者ID:Shuailong,项目名称:StockPrediction,代码行数:61,代码来源:svm_train.py

示例11: svmclassifier

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import predict [as 别名]
def svmclassifier(train,test,train_target,test_target):
    clf = SVC()
    clf.fit(train,train_target)
    res = clf.predict(train)
    print classification_report(train_target,res)
    
    res1 = clf.predict(test)
    print classification_report(test_target,res1)
    return clf
开发者ID:harjeet88,项目名称:kaggle-data-scince-london,代码行数:11,代码来源:predicyion.py

示例12: svm

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import predict [as 别名]
def svm(X_vectors, t):
    # leave-one-out strategy to get average accuracy
    n = len(t)
    true_num = 0
    for i in range(n):
        X_train = list(X_vectors)
        del X_train[i]
        t_train = list(t)
        del t_train[i]
        X_test = X_vectors[i]
        t_test = t[i]

        clf = SVC()
        clf.fit(X_train, t_train)
        y = clf.predict(X_test)
        if y == t_test:
            true_num += 1
    accuracy = 1.0 * true_num / n

    # 8/2 split
    X = np.array(X_vectors)
    tt = list(t)
    pre = []
    rec = []
    for _ in range(100):
        X_train, X_test, t_train, t_test = train_test_split(X, tt, test_size=0.2)
        clf = SVC()
        clf.fit(X_train, t_train)
        y_test = clf.predict(X_test)
        t_pos = 0
        f_pos = 0
        t_neg = 0
        f_neg = 0
        for i in range(len(y_test)):
            if t_test[i] == 1 and y_test[i] == 1:
                t_pos += 1
            elif t_test[i] == 0 and y_test[i] == 1:
                f_pos += 1
            elif t_test[i] == 0 and y_test[i] == 0:
                t_neg += 1
            elif t_test[i] == 1 and y_test[i] == 0:
                f_neg += 1

            if t_pos == 0:
                precision = 0
                recall = 0
            else:
                precision = 1.0 * t_pos / (t_pos + f_pos)
                recall = 1.0 * t_pos / (t_pos + f_neg)
            pre.append(precision)
            rec.append(recall)

    pre = sum(pre) / len(pre)
    rec = sum(rec) / len(rec)
    F = 2 / (1/pre + 1/rec)

    return accuracy, pre, rec, F
开发者ID:Yuliang-Zou,项目名称:FreeFoodCalendar,代码行数:59,代码来源:test.py

示例13: main

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import predict [as 别名]
def main():
    synCor = SynCorrection(5)
    synCorLex = SynCorrection(4, lex = True)
    synRep = SynReplacer(lex = True)
    posWindows = []
    negWindows = []
    with open("C:/MissingWord/train/corpusPart2.txt", "r") as f:
        for index, line in enumerate(f):
            line = line.strip()
            if len(line) > 1:
                tokens = line.split(" ")
                if len(tokens) > 3:
                    removed = random.randint(1, len(tokens) - 2)
                    cutTokens = tokens.copy()
                    del cutTokens[removed]
                    posWindows.append(makeFeatures(synCor, synCorLex, synRep, cutTokens, removed))
                    for i in range(3):
                        negWindowIndex = random.randint(1, len(tokens) - 2)
                        if abs(negWindowIndex - removed) > 0:
                            negWindows.append(makeFeatures(synCor, synCorLex, synRep, cutTokens, negWindowIndex))
            if index > 10000:
                break

    data = []

    for window in posWindows:
        data.append((window, 1))

    for window in negWindows:
        data.append((window, 0))

    random.shuffle(data)

    cutoff = int(len(data) * 7 / 10)

    trainFeatures = [datum[0] for datum in data[:cutoff]]
    trainLabels = [datum[1] for datum in data[:cutoff]]

    testFeatures = [datum[0] for datum in data[cutoff:]]
    testLabels = [datum[1] for datum in data[cutoff:]]

    for i in range(10):
        print(trainFeatures[i])

    trainFeatures = np.array(trainFeatures)
    testFeatures = np.array(testFeatures)

    clf = SVC(C=1)
    #clf = RandomForestClassifier(n_estimators = 1000)
    clf.fit(trainFeatures, trainLabels)
    trainingPred = clf.predict(trainFeatures)
    print(classification_report(trainLabels, trainingPred))
    print(classification_report(testLabels, clf.predict(testFeatures)))

    with open("synCorrTight.clf", "wb") as f:
        pickle.dump(clf, f)
开发者ID:seokhohong,项目名称:missing-word,代码行数:58,代码来源:syntacticCorrection.py

示例14: run_3

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import predict [as 别名]
def run_3():

    fold = 1
    for train, test in StratifiedKFold(subject_labels, n_folds=5):
        # Create empty table for holding predictions
        predictions = dict()
        predictions['diagnosis'] = subject_labels[train]
        # For each ROI, get voxels corresponding to training subjects
        for roi_name in roi_names:
            predictions[roi_name] = []
            X, y = get_xy(
                rois[roi_name].loc[subject_ids[train]],
                label_column='diagnosis1', exclude_columns=['diagnosis1', 'diagnosis2'])
            # Get out-of-sample predictions for each fold in the CV
            scores = []
            for train1, test1 in StratifiedKFold(subject_labels[train], n_folds=4):
                classifier = SVC()
                classifier.fit(X[train1], y[train1])
                y_pred = classifier.predict(X[test1])
                predictions[roi_name].extend(y_pred)
                scores.append(accuracy_score(y[test1], y_pred))
            print('mean score: {}'.format(np.mean(scores)))
            print('complete score: {}'.format(accuracy_score(y, predictions[roi_name])))
        # Create data frame from the predictions and save it to file
        predictions = pd.DataFrame(predictions, index=subject_ids[train])
        predictions.to_csv('outputs/roi_predictions_fold{}.txt'.format(fold))
        fold += 1
        # Now we have, for each ROI, out-of-sample predictions for all training points
        # in the initial training set. Next, fit the second model to the out-of-sample
        # predictions.
        X, y = get_xy(predictions, label_column='diagnosis', exclude_columns=['diagnosis'])
        classifier_combi = SVC(kernel='rbf')
        classifier_combi.fit(X, y)
        # Now we train a classifier on all training points of each ROI
        classifiers = {}
        for roi_name in roi_names:
            X, y = get_xy(
                rois[roi_name].loc[subject_ids[train]],
                label_column='diagnosis1', exclude_columns=['diagnosis1', 'diagnosis2'])
            classifier = SVC()
            classifier.fit(X, y)
            classifiers[roi_name] = classifier
        # Next, we apply the ROI classifiers and combined classifier to the test data
        predictions = dict()
        predictions['diagnosis'] = subject_labels[test]
        for roi_name in roi_names:
            predictions[roi_name] = []
            X, y = get_xy(
                rois[roi_name].loc[subject_ids[test]],
                label_column='diagnosis1', exclude_columns=['diagnosis1', 'diagnosis2'])
            y_pred = classifiers[roi_name].predict(X)
            predictions[roi_name].extend(y_pred)
        predictions = pd.DataFrame(predictions)
        X, y = get_xy(predictions, label_column='diagnosis', exclude_columns=['diagnosis'])
        y_pred = classifier_combi.predict(X)
        print('overall score: {}'.format(accuracy_score(y, y_pred)))
开发者ID:rbrecheisen,项目名称:scripts,代码行数:58,代码来源:two_stage1.py

示例15: SVM

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import predict [as 别名]
def SVM(X, Y, XTest, YTest):
    print '-----------------------------------------------------'
    # grid search over these to find parameters
    CList = [.001, .003, .01, .03, .1, .3, 1, 3, 6, 10, 15, 30, 40]
    gammaList = [.001, .003, .01, .03, .1, .3, 1, 2, 3, 4, 5, 6, 7]
    param_grid = [{'C': CList,
                   'gamma': gammaList,
                   'kernel': ['rbf', 'sigmoid', 'linear']}]
    # grid search over these to find parameters
    # rbf_grid = GridSearchCV(SVC(probability=True), param_grid=param_grid)
    rbf_grid = SVC(C=500, gamma=0.1, probability=True)
    # fit the models
    rbf_grid.fit(X, Y)

    # print("The best parameters are %s with a score of %0.2f"
    #       % (rbf_grid.best_params_, rbf_grid.best_score_))

    print "Computing training statistics"
    rbf_predict_time_training = time.time()
    Ypred_rbf_training = rbf_grid.predict(X)
    rbf_predict_time_training = time.time() - rbf_predict_time_training

    rbf_accuracy_training = metrics.accuracy_score(Y, Ypred_rbf_training)
    rbf_precision_training = metrics.precision_score(Y, Ypred_rbf_training,
                                                     average='binary')
    rbf_recall_training = metrics.recall_score(Y, Ypred_rbf_training,
                                               average='binary')

    print "SVM RBF training prediction time: " + str(rbf_predict_time_training)
    print "SVM RBF training accuracy Score: " + str(rbf_accuracy_training)
    print "SVM RBF training precision Score: " + str(rbf_precision_training)
    print "SVM RBF training recall Score: " + str(rbf_recall_training)

    print "Computing testing statistics"
    rbf_predict_time_test = time.time()
    Ypred_rbf_test = rbf_grid.predict(XTest)
    rbf_predict_time_test = time.time() - rbf_predict_time_test

    rbf_accuracy_test = metrics.accuracy_score(YTest, Ypred_rbf_test)
    rbf_precision_test = metrics.precision_score(YTest, Ypred_rbf_test,
                                                 average='binary')
    rbf_recall_test = metrics.recall_score(YTest, Ypred_rbf_test,
                                           average='binary')

    print "SVM RBF test prediction time: " + str(rbf_predict_time_test)
    print "SVM RBF test accuracy Score: " + str(rbf_accuracy_test)
    print "SVM RBF test precision Score: " + str(rbf_precision_test)
    print "SVM RBF test recall Score: " + str(rbf_recall_test)

    print "Creating ROC curve"
    y_true = YTest
    y_score = rbf_grid.predict_proba(XTest)
    fprSVM, trpSVM, _ = metrics.roc_curve(y_true=y_true,
                                          y_score=y_score[:, 0],
                                          pos_label=0)
    plt.plot(fprSVM, trpSVM, 'b-', label='SVM')
开发者ID:jhurwitzupenn,项目名称:CIS419Project,代码行数:58,代码来源:trainClassifiers.py


注:本文中的sklearn.svm.SVC.predict方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。