当前位置: 首页>>代码示例>>Python>>正文


Python SVC.set_params方法代码示例

本文整理汇总了Python中sklearn.svm.SVC.set_params方法的典型用法代码示例。如果您正苦于以下问题:Python SVC.set_params方法的具体用法?Python SVC.set_params怎么用?Python SVC.set_params使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.svm.SVC的用法示例。


在下文中一共展示了SVC.set_params方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: predict_all_stock

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import set_params [as 别名]
def predict_all_stock():
    params = load_best_params_and_features('./all_instruments_best_features_and_params.csv')
    clf = SVC()
    start_date = '2015-06-01'
    end_date = str(datetime.now().date())
    ret = []
    for idx, param in params.iterrows():
        window_size = param.window_size
        clf_param = dict(C=param.C, gamma=param.gamma)
        clf.set_params(**clf_param)
        feature_names = param['feature1': 'feature5'].values
        feature_names = [item.split('#') for item in feature_names]
        security_id = param.security_id
        logger.info('starting predict security id {}'.format(security_id))
        # print(feature_names)
        # logger.debug(feature_names)
        ta_factors = [(t[0], eval(t[1])) for t in feature_names if len(t) > 1]
        # logger.debug('ta_factors:{0}'.format(ta_factors))
        try:
            accuracy = predict_history(clf=clf, start_date=start_date, end_date=end_date, security_id=security_id, list_ta=ta_factors, window_size=window_size)
        except ValueError:
            pass

        ret.append(dict(security_id=security_id, window_size=window_size, accuracy=accuracy))
    df = pd.DataFrame(ret)

    FILE = 'total_instrument_accuracy(param_selected_by_test_mean_accuracy)' + \
    '_from_' + start_date + "_to_" + end_date + '.csv'
    df.to_csv(FILE)
    return df
开发者ID:FayolChang,项目名称:mlp,代码行数:32,代码来源:find_n_and_factors_for_hs300_components.py

示例2: run

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import set_params [as 别名]
def run(dataset):

    train_X, train_y = dataset['train']
    dev_X, dev_y = dataset['dev']
    test_X, test_y = dataset['test']

    # param tuning

    param_grid = [
        {
            'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
            'kernel': ['linear'],
            'gamma': ['auto']
            },
        {
            'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],
            'kernel': ['rbf'],
            'gamma': ['auto', 0.001, 0.01, 0.1, 1]
            }
        ]

    best_params = {}
    best_accuracy = 0

    clf = SVC(verbose=False)
    for d in param_grid:
        keys = d.keys()
        for v1 in d[keys[0]]:
            for v2 in d[keys[1]]:
                for v3 in d[keys[2]]:
                    params = {keys[0]: v1, keys[1]: v2, keys[2]: v3}
                    print 'Params:', params
                    clf.set_params(**params)
                    clf.fit(train_X, train_y)
                    acc_test = clf.score(dev_X, dev_y)
                    acc_train = clf.score(train_X, train_y)
                    print 'Train Acc:', acc_train
                    print 'Dev Acc:', acc_test
                    if acc_test > best_accuracy:
                        best_accuracy = acc_test
                        best_params = params
    clf.set_params(**best_params)
    clf.fit(train_X, train_y)
    print best_params
    print 'Predicting...'
    predict_y = clf.predict(train_X)
    Acc, MCC = score(train_y, predict_y)
    print 'Training Data Eval:'
    print 'Acc: {}%\tMCC: {}%'.format(round(Acc*100, 2), round(MCC*100, 2))

    predict_y = clf.predict(dev_X)
    Acc, MCC = score(dev_y, predict_y)
    print 'Development Data Eval:'
    print 'Acc: {}%\tMCC: {}%'.format(round(Acc*100, 2), round(MCC*100, 2))

    predict_y = clf.predict(test_X)
    Acc, MCC = score(test_y, predict_y)
    print 'Test Data Eval:'
    print 'Acc: {}%\tMCC: {}%'.format(round(Acc*100, 2), round(MCC*100, 2))
开发者ID:Shuailong,项目名称:StockPrediction,代码行数:61,代码来源:svm_train.py

示例3: trainModel

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import set_params [as 别名]
def trainModel(K, Y, C):
    """ trains and returns SVM model for input kernel and labels
    """

    clf = SVC(class_weight="auto", kernel="precomputed")
    clf.set_params(**{'C':C})
    clf.fit(K, Y)

    return clf
开发者ID:kldykstra,项目名称:webtool-SVM,代码行数:11,代码来源:trainSVM.py

示例4: refitting_updating_test1

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import set_params [as 别名]
def refitting_updating_test1():
	rng = np.random.RandomState(0)
	X = rng.rand(100, 10)	# 100*10 matrix
	y = rng.binomial(1, 0.5, 100)	# 100 ndarray
	X_test = rng.rand(5, 10)	# test set : 5
	clf = SVC()
	clf.set_params(kernel='linear').fit(X, y)
	result = clf.predict(X_test)
	print(result)
	clf.set_params(kernel='rbf').fit(X, y)
	result = clf.predict(X_test)
	print(result)
开发者ID:JoshuaMichaelKing,项目名称:MyLearning,代码行数:14,代码来源:scikit-sample.py

示例5: autoRefittingParameters

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import set_params [as 别名]
def autoRefittingParameters():
    """
    Refitting and updating parameters
    :return:
    """
    import numpy as np
    from sklearn.svm import SVC
    rng = np.random.RandomState(0)
    X = rng.rand(100, 10)
    y = rng.binomial(1, 0.5, 100)
    X_test = rng.rand(5, 10)

    clf = SVC()
    clf.set_params(kernel='linear').fit(X, y)
    clf.predict(X_test)
    clf.set_params(kernel='rbf').fit(X, y)
    clf.predict(X_test)
开发者ID:avain,项目名称:machineLearning,代码行数:19,代码来源:snippy.py

示例6: __predict

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import set_params [as 别名]
def __predict(data_source, end_date, get_config_func, index, look_back,
              predict_type, start_date, ticks):
    # 1. 获取数据
    ins = GetPriceData(sid=ticks, index=index, data_source=data_source)
    data = ins.batch_get_data(look_back, start_date, end_date)
    # 把data从一个长的frame转换成一个tick一份
    data = [(tick, dta.drop(['tick'], axis=1)) for tick, dta in
            data.groupby(['tick'])]
    # TODO: 修改成先读取config,再生成feature
    features_and_lables = [generate_features_and_labels(dta,
                                                        const.ta_factors,
                                                        predict_type)
                           for dta in data]
    data_for_pred = [
        generate_feature_and_params_from_config(x, get_config_func) for x in
        features_and_lables]
    results = []
    clf = SVC()
    for tick, X, y, window_size, clf_params in data_for_pred:
        try:
            data_gen = generate_data(
                X,
                y,
                window_size=window_size,
                start_date=start_date,
                end_date=end_date)
            clf.set_params(**clf_params)
        except LengthError, e:
            logger.critical('tick = {}'.format(tick))
            logger.exception(e)
        preds = batch_predict(clf, data_gen, probability=False)
        preds_list = list(preds)
        preds_idx = trade_cal.loc[start_date: end_date].index
        preds = pd.Series(preds_list, index=preds_idx)
        facts = y.shift(1).ix[start_date: end_date]
        df = pd.DataFrame({'prediction': preds, 'fact': facts})
        df.loc[:, 'tick'] = tick
        results.append(df)
开发者ID:FayolChang,项目名称:mlp,代码行数:40,代码来源:workflow.py

示例7: main

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import set_params [as 别名]
def main():
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s %(message)s')
    newsgroups = fetch_20newsgroups(subset='all',
                                    categories=['alt.atheism', 'sci.space'])

    vectorizer = TfidfVectorizer()
    X = newsgroups.target
    y = newsgroups.data
    X_train = vectorizer.fit_transform(y)
    grid = {'C': np.power(10.0, np.arange(-5, 6))}
    cv = KFold(X_train.shape[0], n_folds=5, shuffle=True, random_state=241)
    clf = SVC(kernel='linear', random_state=241)
    gs = GridSearchCV(clf, grid, scoring='accuracy', cv=cv)

    gs.fit(X_train, X)
    clf.set_params(**gs.best_params_)
    clf.fit(X_train, X)
    result = (show_top10(clf, vectorizer))
    result.sort()
    write_submission(str(
            [x for x in result]).lower().encode('ascii', 'ignore'),
        '71')  # still need some work to get rid of unicode problem
开发者ID:ToxaZ,项目名称:coursera_ml,代码行数:25,代码来源:svm-text.py

示例8: dataset3_params

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import set_params [as 别名]
def dataset3_params(X, y, Xval, yval):
    # You need to return the following variables correctly.
    C = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30]
    sigma = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30]

    minError = sys.maxsize
    finalC = 0
    finalSigma = 0

    clf = SVC(kernel='rbf')

    for i in C:
        for j in sigma:
            clf = clf.set_params(C=i, gamma=1 / (2 * j * j))
            clf.fit(X, y.ravel())
            predictions = clf.predict(Xval)
            error = np.mean(predictions.reshape(-1, 1) != yval)
            if error <= minError:
                minError = error
                finalC = i
                finalSigma = j
    return finalC, finalSigma
开发者ID:wchen342,项目名称:cs229_hw,代码行数:24,代码来源:ex6.py

示例9: DualSvm

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import set_params [as 别名]
class DualSvm(object):
    """
    This is is the implementation of a combined Support Vector classifier.
    The goal is to trade accuracy for speed by giving the 'hardest' points to the second classifier.
    The combined classifier consists of a linearSVC classifier (less accurate) and a SVC classifier with RBF-Kernel (more accurate).

    The user has to set a trade-off parameter k, which determines how many points percentage-wise are given to the second classifier.
    The points are chosen according to their distance to the hyperplane of the linear classifier.

    """

    def __init__(self, use_distance=True, c_lin=0.001, c_gauss=10, gamma=0.01, k=0, verbose=True):
        """
        The constructor of the class.

        :param c_lin:      Penalty parameter C of the error term of the linear support vector machine.
        :param c_gauss:    Penalty parameter C of the error term of gaussian support vector machine
        :param gamma:     Kernel coefficient for the gaussian svm
        :param k:         k has to be in the range [0,1]. It determines which percentage of closest points should be given to the gaussian svm, sorted by their margins.
        :param verbose: Debug parameter for logging events into a file debug.txt.
        :return:          Returns self.

        """
        self._use_distance = use_distance
        # Parameters
        self._c_lin = c_lin
        self._c_gauss = c_gauss
        self._gamma = gamma
        self._k = k

        self._n_gauss = -1
        self._n_lin = -1
        self._verbose = verbose

        # Intern objects
        self._lin_svc = LinearSVC(C=self._c_lin)
        self._gauss_svc = SVC(C=self._c_gauss, kernel="rbf", gamma=self._gamma)
        self._gauss_distance = 0

    # region Getters and Setters
    @property
    def c_lin(self):
        """
        The C parameter for the linear SVM.
        """
        return self._c_lin

    @c_lin.setter
    def c_lin(self, value):
        self._c_lin = value
        self._lin_svc.set_params(C=value)

    @property
    def c_gauss(self):
        """
        The C parameter for the gauss SVM.
        """
        return self._c_gauss

    @c_gauss.setter
    def c_gauss(self, value):
        self._c_gauss = value
        self._gauss_svc.set_params(C=value)

    @property
    def gamma(self):
        """
        The gamma parameter for the gauss SVM.
        """
        return self._gamma

    @gamma.setter
    def gamma(self, value):
        self._gamma = value
        self._gauss_svc.set_params(gamma=value)

    @property
    def k(self):
        """
        The percentage of points that should be given to the second classifier.
        """
        return self._k

    @property
    def time_fit_lin(self):
        return self._time_fit_lin

    @property
    def time_fit_gauss(self):
        return self._time_fit_gauss

    @property
    def time_overhead(self):
        return self._time_overhead

    @property
    def time_predict(self):
        return self._time_predict

    @k.setter
#.........这里部分代码省略.........
开发者ID:Nerolex,项目名称:garrulous-woof,代码行数:103,代码来源:Classifier.py

示例10: TfidfVectorizer

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import set_params [as 别名]
newsgroups = datasets.fetch_20newsgroups(
    subset='all',
    categories=['alt.atheism', 'sci.space'])

tfidf = TfidfVectorizer()
X = tfidf.fit_transform(newsgroups.data)
y = newsgroups.target

grid = {'C': np.power(10.0, np.arange(-5, 6))}
cv = KFold(y.size, n_folds=5, shuffle=True, random_state=241)
clf = SVC(kernel='linear', random_state=241)
gs = GridSearchCV(clf, grid, scoring='accuracy', cv=cv)
gs.fit(X, y)

best_score = 0
best_C = None
for a in gs.grid_scores_:
    if a.mean_validation_score > best_score:
        best_score = a.mean_validation_score
        best_C = a.parameters['C']

clf.set_params(C = best_C)
clf.fit(X, y)

ind = np.argsort(np.absolute(np.asarray(clf.coef_.todense())).reshape(-1))[-10:]

words = [tfidf.get_feature_names()[i] for i in ind]

with open("q1.txt", "w") as output:
    output.write('%s' % (" ".join(sorted(words))))
开发者ID:dstarcev,项目名称:coursera-machine-learning-yandex,代码行数:32,代码来源:main.py

示例11: loadDataHelper

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import set_params [as 别名]

if __name__ == '__main__':
	# get X and y
	train_x, train_y = loadDataHelper('train_data.txt')
	test_x, test_id = loadDataHelper('test_data.txt')
	print('train size: %d %d' % (len(train_x), len(train_y)))
	print('test size: %d %d' % (len(test_x), len(test_id)))

	# fit a SVM model to the data
	# model = LogisticRegression()
	# model = GaussianNB()
	# model = KNeighborsClassifier()
	# fit a CART model to the data
	# model = DecisionTreeClassifier()
	model = SVC()
	model.set_params(kernel='linear').fit(train_x, train_y)
	# model = LinearSVC()
	# model.fit(train_x, train_y)
	print(model)

	# make predictions
	# expected = train_y
	# predicted = model.predict(train_x)
	# # summarize the fit of the model
	# print(metrics.classification_report(expected, predicted))
	# print(metrics.confusion_matrix(expected, predicted))

	predicted = model.predict(test_x)
	saveResult('result-linearsvm.csv', test_id, predicted)
开发者ID:liaoyt,项目名称:sf_crime_classification,代码行数:31,代码来源:doit.py

示例12: SVC

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import set_params [as 别名]
import numpy as np
from sklearn.svm import SVC

rng = np.random.RandomState(0)
X = rng.rand(100, 10)
y = rng.binomial(1, 0.5, 100)
X_test = rng.rand(5, 10)

clf = SVC()
clf.set_params(kernel='linear').fit(X, y)
print(clf.predict(X_test))


clf.set_params(kernel='rbf').fit(X, y)
print(clf.predict(X_test))
开发者ID:imaculate,项目名称:scikit-learn-tutorials,代码行数:17,代码来源:SetParams.py

示例13: RBFScanner

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import set_params [as 别名]
class RBFScanner(BaseScanner):


    def __init__(self, X, y, C_lims=(-12,12), gamma_lims=(-12,12), n_steps=50, n_iters=20, logvals=True, class_names=None, seed=None):

        BaseScanner.__init__(self, X, y, class_names, n_steps, n_iters, seed)
        self.clf = SVC(kernel='rbf', decision_function_shape='ovr', max_iter=self.max_solver_iters)
        self.scan(logvals, C_lims, gamma_lims)


    def scan(self, logvals=True, C_lims=None, gamma_lims=None):

        self.logvals = logvals

        if C_lims:
            self.C_vals = np.linspace(C_lims[0], C_lims[1], self.n_steps)
            if self.logvals:
                self.C_vals = 10**self.C_vals

        if gamma_lims:
            self.gamma_vals = np.linspace(gamma_lims[0], gamma_lims[1], self.n_steps)
            if self.logvals:
                self.gamma_vals = 10**self.gamma_vals

        if self.logvals:
            x_vals = np.log10(self.gamma_vals)
            y_vals = np.log10(self.C_vals)
            y_label = 'Log C'
            x_label = 'Log Gamma'
        else:
            x_vals = self.gamma_vals
            y_vals = self.C_vals
            y_label = 'C'
            x_label = 'Gamma'

        self.plot_params.update({
            'x_vals'  : x_vals,
            'y_vals'  : y_vals,
            'x_label' : x_label,
            'y_label' : y_label,
        })

        self.accs = np.zeros((self.n_steps, self.n_steps, self.n_classes+3))

        scan_seed = self.seeds.pop()
        randgen = np.random.RandomState(seed=scan_seed)
        split_seeds = randgen.randint(0, int(1e6), int(self.n_iters * (self.n_steps ** 2))).tolist()
    
        for j, C_val_j in tqdm(enumerate(self.C_vals), total=self.n_steps, desc='Scanning progress'):
            for i, gamma_val_i in enumerate(self.gamma_vals):
                scores = []
                scores_train = []
                f1_arr      = np.zeros((self.n_iters, self.n_classes), dtype=np.float64)
                support_arr = np.zeros((self.n_iters, self.n_classes), dtype=np.int8)
                for n in range(self.n_iters):
                    split_seed = split_seeds.pop()
                    self.clf.set_params(C=C_val_j, gamma=gamma_val_i)
                    X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=0.33, random_state=split_seed)
                    self.clf.fit(X_train, y_train)
                    y_pred = self.clf.predict(X_test)
                    matches = y_pred == y_test
                    scores.append(matches.sum() / matches.size)
                    f1s, support = precision_recall_fscore_support(y_test, y_pred)[2:]
                    f1_arr[n,:] = f1s
                    support_arr[n,:] = support
                    y_pred = self.clf.predict(X_train)
                    matches = y_pred == y_train
                    scores_train.append(matches.sum() / matches.size)

                f1_avg = (f1_arr * support_arr).sum(axis=0) / support_arr.sum(axis=0)
                self.accs[i,j,3:] = f1_avg
                scores = np.array(scores)
                scores_train = np.array(scores_train)
                self.accs[i,j,0] = scores.mean()
                self.accs[i,j,1] = scores_train.mean()

        self.accs[:,:,2] = self.accs[:,:,1] - self.accs[:,:,0]
        self._find_optimals()


    def show_train_test(self, n_cols=3, dims=(300,300), v_lims=(0.0, 1.0), save_string=None):
        
        plot_titles = [
            'Overall Accuracy',
            'Training Accuracy',
            'Difference'
        ]

        self.plot_params.update({
            'plot_titles' : plot_titles,
            'n_cols'      : n_cols,
            'dims'        : dims,
            'v_lims'      : v_lims,
            'save_string' : save_string
        })

        fig = self._make_multiplot()
        return fig


#.........这里部分代码省略.........
开发者ID:thomasbkahn,项目名称:svc-opt,代码行数:103,代码来源:scanners.py

示例14: SVC

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import set_params [as 别名]
import numpy as np
from sklearn.svm import SVC

rng = np.random.RandomState(0)
X = rng.rand(100, 10)
y = rng.binomial(1, 0.5, 100)
X_test = rng.rand(5, 10)

clf = SVC()
models = clf.set_params(kernel='linear').fit(X, y)
result = clf.predict(X_test)
print(result)

models = clf.set_params(kernel='rbf').fit(X, y)
result = clf.predict(X_test)
print(result)
开发者ID:Og192,项目名称:python,代码行数:18,代码来源:refittingUpdatingParameters.py

示例15: SVMPlotter

# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import set_params [as 别名]
class SVMPlotter(object):

    def __init__(self, figsize=(9,7), point_size=95):

        self.figsize = figsize
        self.point_size = point_size

        self.colors = dict(blue='#1F77B4',
                           orange='#FF7F0E',
                           green='#2CA02C',
                           red='#D62728',
                           purple='#9467BD',
                           brown='#8C564B',
                           pink='#E377C2',
                           grey='#7F7F7F',
                           yellow='#BCBD22',
                           teal='#17BECF')

        self.model = SVC(kernel='linear', C=1.0)


    def initialize_data(self, class_n=20):
        self.class_n = class_n
        self.XY_container = XY(class_n)


    def matrix_ranges(self, X):
        x1_min, x2_min = np.amin(X, axis=0) - np.std(X, axis=0)/2
        x1_max, x2_max = np.amax(X, axis=0) + np.std(X, axis=0)/2

        x1_range = x1_max - x1_min
        x2_range = x2_max - x2_min

        return x1_min, x1_max, x1_range, x2_min, x2_max, x2_range


    def fit_svm(self, X, y, C=1.0):
        self.model.set_params(C=C)
        self.model.fit(X, y)
        # code taken from:
        # http://scikit-learn.org/stable/auto_examples/svm/plot_svm_margin.html
        margin = 1. / np.sqrt(np.sum(self.model.coef_ ** 2))

        return margin


    def calculate_hyperplane_margin(self, x1_min, x1_max, margin):

        weights = self.model.coef_[0]

        alpha = -weights[0] / weights[1]

        xx = np.linspace(x1_min, x1_max)

        # Intercept does not mean the same thing in SVM as in regression!
        yy = alpha * xx - (self.model.intercept_[0]) / weights[1]

        yy_down = yy + alpha * margin
        yy_up = yy - alpha * margin

        return xx, yy, yy_down, yy_up


    def svm_plot(self, X, y, C=1.0):

        x1_min, x1_max, x1_r, x2_min, x2_max, x2_r = self.matrix_ranges(X)

        margin = self.fit_svm(X, y, C=C)

        xx, yy, yy_down, yy_up = self.calculate_hyperplane_margin(x1_min, x1_max, margin)

        # Set the figure size to be big enough to see stuff
        plt.figure(figsize=self.figsize)

        # plot the line, the points, and the nearest vectors to the plane
        plt.plot(xx, yy, 'k-', lw=4)
        plt.plot(xx, yy_down, 'k--', lw=1.5, color=self.colors['yellow'])
        plt.plot(xx, yy_up, 'k--', lw=1.5, color=self.colors['yellow'])

        plt.scatter(self.model.support_vectors_[:, 0], self.model.support_vectors_[:, 1],
                    s=self.point_size*4, facecolors='none', edgecolors=self.colors['grey'],
                    lw=1.5)

        #plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired, s=self.point_size)
        plt.scatter(X[y==1, 0], X[y==1, 1], color=self.colors['blue'],
                    s=self.point_size, label='female')
        plt.scatter(X[y==0, 0], X[y==0, 1], color=self.colors['red'],
                    s=self.point_size, label='male')

        # set the axis limits:
        plt.xlim(x1_min, x1_max)
        plt.ylim(x2_min, x2_max)

        plt.xlabel('weight (lb)', fontsize=16)
        plt.ylabel('height (in)', fontsize=16)

        plt.title('male vs. female by height and weight\n')
        plt.legend(loc="lower right")

        plt.tick_params(labelsize=14)
#.........这里部分代码省略.........
开发者ID:akodate,项目名称:DSI-SF-2-akodate,代码行数:103,代码来源:svm_plotter.py


注:本文中的sklearn.svm.SVC.set_params方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。