當前位置: 首頁>>代碼示例>>Python>>正文


Python svm.SVC屬性代碼示例

本文整理匯總了Python中sklearn.svm.SVC屬性的典型用法代碼示例。如果您正苦於以下問題:Python svm.SVC屬性的具體用法?Python svm.SVC怎麽用?Python svm.SVC使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在sklearn.svm的用法示例。


在下文中一共展示了svm.SVC屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _build_model

# 需要導入模塊: from sklearn import svm [as 別名]
# 或者: from sklearn.svm import SVC [as 別名]
def _build_model(self,model_name,params=None):
        if params==None:
            if model_name=='xgb':
                self.model=XGBClassifier(n_estimators=100,learning_rate=0.02)
            elif model_name=='svm':
                kernel_function=chi2_kernel if not (self.model_kernel=='linear' or self.model_kernel=='rbf') else self.model_kernel
                self.model=SVC(C=1,kernel=kernel_function,gamma=1,probability=True)
            elif model_name=='lr':
                self.model=LR(C=1,penalty='l1',tol=1e-6)
        else:
            if model_name=='xgb':
                self.model=XGBClassifier(n_estimators=1000,learning_rate=0.02,**params)
            elif model_name=='svm':
                self.model=SVC(C=1,kernel=kernel_function,gamma=1,probability=True)
            elif model_name=='lr':
                self.model=LR(C=1,penalty='l1',tol=1e-6)

        log.l.info('=======> built the model {} done'.format(self.model_name)) 
開發者ID:qijiezhao,項目名稱:Video-Highlight-Detection,代碼行數:20,代碼來源:classifier.py

示例2: test_svm

# 需要導入模塊: from sklearn import svm [as 別名]
# 或者: from sklearn.svm import SVC [as 別名]
def test_svm(self):
        svc_clf = SVC(gamma="auto")
        svc_clf.fit(self.X_train, self.y_train)
        svm = SVM()
        svm.train(Dataset(self.X_train, self.y_train))

        assert_array_equal(
            svc_clf.predict(self.X_train), svm.predict(self.X_train))
        assert_array_equal(
            svc_clf.predict(self.X_test), svm.predict(self.X_test))
        self.assertEqual(
            svc_clf.score(self.X_train, self.y_train),
            svm.score(Dataset(self.X_train, self.y_train)))
        self.assertEqual(
            svc_clf.score(self.X_test, self.y_test),
            svm.score(Dataset(self.X_test, self.y_test))) 
開發者ID:ntucllab,項目名稱:libact,代碼行數:18,代碼來源:test_svm.py

示例3: create_pandas_only_svm_classifier

# 需要導入模塊: from sklearn import svm [as 別名]
# 或者: from sklearn.svm import SVC [as 別名]
def create_pandas_only_svm_classifier(X, y, probability=True):
    class PandasOnlyEstimator(TransformerMixin):
        def fit(self, X, y=None, **fitparams):
            return self

        def transform(self, X, **transformparams):
            dataset_is_df = isinstance(X, pd.DataFrame)
            if not dataset_is_df:
                raise Exception("Dataset must be a pandas dataframe!")
            return X

    pandas_only = PandasOnlyEstimator()

    clf = svm.SVC(gamma=0.001, C=100.0, probability=probability, random_state=777)
    pipeline = Pipeline([("pandas_only", pandas_only), ("clf", clf)])
    return pipeline.fit(X, y) 
開發者ID:interpretml,項目名稱:interpret-text,代碼行數:18,代碼來源:common_utils.py

示例4: multi_class_classification

# 需要導入模塊: from sklearn import svm [as 別名]
# 或者: from sklearn.svm import SVC [as 別名]
def multi_class_classification(data_X,data_Y):
    '''
    calculate multi-class classification and return related evaluation metrics
    '''

    svc = svm.SVC(C=1, kernel='linear')
    # X_train, X_test, y_train, y_test = train_test_split( data_X, data_Y, test_size=0.4, random_state=0) 
    clf = svc.fit(data_X, data_Y) #svm
    # array = svc.coef_
    # print array
    predicted = cross_val_predict(clf, data_X, data_Y, cv=2)
    print "accuracy",metrics.accuracy_score(data_Y, predicted)
    print "f1 score macro",metrics.f1_score(data_Y, predicted, average='macro') 
    print "f1 score micro",metrics.f1_score(data_Y, predicted, average='micro') 
    print "precision score",metrics.precision_score(data_Y, predicted, average='macro') 
    print "recall score",metrics.recall_score(data_Y, predicted, average='macro') 
    print "hamming_loss",metrics.hamming_loss(data_Y, predicted)
    print "classification_report", metrics.classification_report(data_Y, predicted)
    print "jaccard_similarity_score", metrics.jaccard_similarity_score(data_Y, predicted)
    # print "log_loss", metrics.log_loss(data_Y, predicted)
    print "zero_one_loss", metrics.zero_one_loss(data_Y, predicted)
    # print "AUC&ROC",metrics.roc_auc_score(data_Y, predicted)
    # print "matthews_corrcoef", metrics.matthews_corrcoef(data_Y, predicted) 
開發者ID:RoyZhengGao,項目名稱:edge2vec,代碼行數:25,代碼來源:multi_class_classification.py

示例5: __init__

# 需要導入模塊: from sklearn import svm [as 別名]
# 或者: from sklearn.svm import SVC [as 別名]
def __init__(self, 
        learner=SVC(C=1000), 
        multiclass_strategy='ova', 
        verbose=False,
        max_iter=1000, 
        learning_rate=0.01, 
        tolerance=1e-7, 
        callbacks=[], 
        scheduler=None ):

        super().__init__(
            learner=learner, 
            multiclass_strategy=multiclass_strategy, 
            max_iter=max_iter, 
            verbose=verbose, 
            tolerance=tolerance,
            callbacks=callbacks,
            scheduler=scheduler, 
            direction='min', 
            learning_rate=learning_rate, 
        )
        self.func_form = summation 
開發者ID:IvanoLauriola,項目名稱:MKLpy,代碼行數:24,代碼來源:GRAM.py

示例6: compute_accuracy_svc

# 需要導入模塊: from sklearn import svm [as 別名]
# 或者: from sklearn.svm import SVC [as 別名]
def compute_accuracy_svc(
    data_train,
    labels_train,
    data_test,
    labels_test,
    param_grid=None,
    verbose=0,
    max_iter=-1,
):
    if param_grid is None:
        param_grid = [
            {"C": [1, 10, 100, 1000], "kernel": ["linear"]},
            {"C": [1, 10, 100, 1000], "gamma": [0.001, 0.0001], "kernel": ["rbf"]},
        ]
    svc = SVC(max_iter=max_iter)
    clf = GridSearchCV(svc, param_grid, verbose=verbose, cv=3)
    return compute_accuracy_classifier(
        clf, data_train, labels_train, data_test, labels_test
    ) 
開發者ID:YosefLab,項目名稱:scVI,代碼行數:21,代碼來源:annotation.py

示例7: __init__

# 需要導入模塊: from sklearn import svm [as 別名]
# 或者: from sklearn.svm import SVC [as 別名]
def __init__(self, *args, **kwargs):
        super(MaximumLossReductionMaximalConfidence, self).__init__(*args, **kwargs)

        # self.n_labels = len(self.dataset.get_labeled_entries()[0][1])
        self.n_labels = len(self.dataset.get_labeled_entries()[1][0])

        random_state = kwargs.pop('random_state', None)
        self.random_state_ = seed_random_state(random_state)

        self.logreg_param = kwargs.pop('logreg_param',
                                       {'multi_class': 'multinomial',
                                        'solver': 'newton-cg',
                                        'random_state': random_state})
        self.logistic_regression_ = LogisticRegression(**self.logreg_param)

        self.br_base = kwargs.pop('br_base',
              SklearnProbaAdapter(SVC(kernel='linear',
                                      probability=True,
                                      gamma="auto",
                                      random_state=random_state))) 
開發者ID:ntucllab,項目名稱:libact,代碼行數:22,代碼來源:maximum_margin_reduction.py

示例8: objective_function

# 需要導入模塊: from sklearn import svm [as 別名]
# 或者: from sklearn.svm import SVC [as 別名]
def objective_function(x, s):
    # Start the clock to determine the cost of this function evaluation
    start_time = time.time()

    # Shuffle the data and split up the request subset of the training data
    s_max = y_train.shape[0]
    shuffle = np.random.permutation(np.arange(s_max))
    train_subset = X_train[shuffle[:s]]
    train_targets_subset = y_train[shuffle[:s]]

    # Train the SVM on the subset set
    C = np.exp(float(x[0]))
    gamma = np.exp(float(x[1]))
    clf = svm.SVC(gamma=gamma, C=C)
    clf.fit(train_subset, train_targets_subset)
    
    # Validate this hyperparameter configuration on the full validation data
    y = 1 - clf.score(X_val, y_val)

    c = time.time() - start_time

    return y, c

# Load the data 
開發者ID:automl,項目名稱:RoBO,代碼行數:26,代碼來源:example_fabolas.py

示例9: buildModel

# 需要導入模塊: from sklearn import svm [as 別名]
# 或者: from sklearn.svm import SVC [as 別名]
def buildModel(dataset, method, parameters):
    """
    Build final model for predicting real testing data
    """
    features = dataset.columns[0:-1]

    if method == 'RNN':
        clf = performRNNlass(dataset[features], dataset['UpDown'])
        return clf

    elif method == 'RF':
        clf = RandomForestClassifier(n_estimators=1000, n_jobs=-1)

    elif method == 'KNN':
        clf = neighbors.KNeighborsClassifier()

    elif method == 'SVM':
        c = parameters[0]
        g =  parameters[1]
        clf = SVC(C=c, gamma=g)

    elif method == 'ADA':
        clf = AdaBoostClassifier()

    return clf.fit(dataset[features], dataset['UpDown']) 
開發者ID:chinuy,項目名稱:stock-price-prediction,代碼行數:27,代碼來源:classifier.py

示例10: sk_svm_train

# 需要導入模塊: from sklearn import svm [as 別名]
# 或者: from sklearn.svm import SVC [as 別名]
def sk_svm_train(intr, labeltr, inte, labelte, kener):
    clf = svm.SVC(kernel=kener)
    # 開始訓練
    clf.fit(intr, labeltr)
    #  繪圖的標識
    figsign = kener
    #  訓練精確度
    acc_train = clf.score(intr, labeltr)
    #  測試精確度
    acc_test = clf.score(inte, labelte)
    #  支持向量的個數
    vec_count = sum(clf.n_support_)
    #  支持向量
    vectors = clf.support_vectors_

    return acc_train, acc_test, vec_count, vectors, figsign


# 結果輸出函數 
開發者ID:Anfany,項目名稱:Machine-Learning-for-Beginner-by-Python3,代碼行數:21,代碼來源:Sklearn_Classify_SVM.py

示例11: _prepare_classifier

# 需要導入模塊: from sklearn import svm [as 別名]
# 或者: from sklearn.svm import SVC [as 別名]
def _prepare_classifier(self, params, n_jobs=1):

        X_train, y_train = params

        tuned_parameters = [{
            'kernel': ['rbf'], 
            'gamma': [1e-4,1e-3,1e-2,1e-1,1e+0,1e+1,1e+2,1e+3,1e+4],
            'C': [1e+0,1e+1,1e+2,1e+3,1e+4,1e+5,1e+6,1e+7,1e+8,1e+9]
        }]

        clf=RandomizedSearchCV(svm.SVC(random_state=self.random_state), 
                               tuned_parameters[0], 
                               n_iter=self.n_randomized_search_iter, 
                               n_jobs=n_jobs, random_state=self.random_state)
        clf.fit(X_train, y_train)
              
        params=clf.best_params_
        clf=svm.SVC(kernel=params['kernel'], C=params['C'], 
            gamma=params['gamma'], probability=True, 
            random_state=self.random_state)
        clf.fit(X_train, y_train)

        return clf 
開發者ID:daniellerch,項目名稱:aletheia,代碼行數:25,代碼來源:models.py

示例12: SVM

# 需要導入模塊: from sklearn import svm [as 別名]
# 或者: from sklearn.svm import SVC [as 別名]
def SVM():
    '''data1——線性分類'''
    data1 = spio.loadmat('data1.mat')
    X = data1['X']
    y = data1['y']
    y = np.ravel(y)
    plot_data(X, y)

    model = svm.SVC(C=1.0, kernel='linear').fit(X, y)  # 指定核函數為線性核函數
    plot_decisionBoundary(X, y, model)  # 畫決策邊界
    '''data2——非線性分類'''
    data2 = spio.loadmat('data2.mat')
    X = data2['X']
    y = data2['y']
    y = np.ravel(y)
    plt = plot_data(X, y)
    plt.show()

    model = svm.SVC(gamma=100).fit(X, y)  # gamma為核函數的係數,值越大擬合的越好
    plot_decisionBoundary(X, y, model, class_='notLinear')  # 畫決策邊界


# 作圖 
開發者ID:lawlite19,項目名稱:MachineLearning_Python,代碼行數:25,代碼來源:SVM_scikit-learn.py

示例13: run_svms

# 需要導入模塊: from sklearn import svm [as 別名]
# 或者: from sklearn.svm import SVC [as 別名]
def run_svms():
    svm_training_data, svm_validation_data, svm_test_data \
        = mnist_loader.load_data()
    accuracies = []
    for size in SIZES:
        print "\n\nTraining SVM with data set size %s" % size
        clf = svm.SVC()
        clf.fit(svm_training_data[0][:size], svm_training_data[1][:size])
        predictions = [int(a) for a in clf.predict(svm_validation_data[0])]
        accuracy = sum(int(a == y) for a, y in 
                       zip(predictions, svm_validation_data[1])) / 100.0
        print "Accuracy was %s percent" % accuracy
        accuracies.append(accuracy)
    f = open("more_data_svm.json", "w")
    json.dump(accuracies, f)
    f.close() 
開發者ID:dalmia,項目名稱:WannaPark,代碼行數:18,代碼來源:more_data.py

示例14: __call__

# 需要導入模塊: from sklearn import svm [as 別名]
# 或者: from sklearn.svm import SVC [as 別名]
def __call__(self, estimator):
        fitted_estimator = estimator.fit(self.X_train, self.y_train)

        if isinstance(estimator, (LinearClassifierMixin, SVC, NuSVC,
                                  LightBaseClassifier)):
            y_pred = estimator.decision_function(self.X_test)
        elif isinstance(estimator, DecisionTreeClassifier):
            y_pred = estimator.predict_proba(self.X_test.astype(np.float32))
        elif isinstance(
                estimator,
                (ForestClassifier, XGBClassifier, LGBMClassifier)):
            y_pred = estimator.predict_proba(self.X_test)
        else:
            y_pred = estimator.predict(self.X_test)

        return self.X_test, y_pred, fitted_estimator 
開發者ID:BayesWitnesses,項目名稱:m2cgen,代碼行數:18,代碼來源:utils.py

示例15: test_linear_kernel

# 需要導入模塊: from sklearn import svm [as 別名]
# 或者: from sklearn.svm import SVC [as 別名]
def test_linear_kernel():
    estimator = svm.SVC(kernel="linear", random_state=1)

    estimator.fit([[1], [2]], [1, 2])

    assembler = assemblers.SklearnSVMModelAssembler(estimator)
    actual = assembler.assemble()

    def kernel_ast(sup_vec_value):
        return ast.BinNumExpr(
            ast.NumVal(sup_vec_value),
            ast.FeatureRef(0),
            ast.BinNumOpType.MUL)

    expected = _create_expected_single_output_ast(
        estimator.dual_coef_, estimator.intercept_,
        [kernel_ast(1.0), kernel_ast(2.0)])

    assert utils.cmp_exprs(actual, expected) 
開發者ID:BayesWitnesses,項目名稱:m2cgen,代碼行數:21,代碼來源:test_svm.py


注:本文中的sklearn.svm.SVC屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。