当前位置: 首页>>代码示例>>Python>>正文


Python naive_bayes.GaussianNB方法代码示例

本文整理汇总了Python中sklearn.naive_bayes.GaussianNB方法的典型用法代码示例。如果您正苦于以下问题:Python naive_bayes.GaussianNB方法的具体用法?Python naive_bayes.GaussianNB怎么用?Python naive_bayes.GaussianNB使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.naive_bayes的用法示例。


在下文中一共展示了naive_bayes.GaussianNB方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_naivebayes_breastcancer_cont

# 需要导入模块: from sklearn import naive_bayes [as 别名]
# 或者: from sklearn.naive_bayes import GaussianNB [as 别名]
def test_naivebayes_breastcancer_cont(self):
        # python -m unittest tests_classification.Tests_Classification.test_naivebayes_breastcancer_cont
        from sklearn.naive_bayes import GaussianNB
        from discomll.classification import naivebayes

        x_train, y_train, x_test, y_test = datasets.breastcancer_cont(replication=1)
        train_data, test_data = datasets.breastcancer_cont_discomll(replication=1)

        clf = GaussianNB()
        probs_log1 = clf.fit(x_train, y_train).predict_proba(x_test)

        fitmodel_url = naivebayes.fit(train_data)
        prediction_url = naivebayes.predict(test_data, fitmodel_url)
        probs_log2 = [v[1] for _, v in result_iterator(prediction_url)]

        self.assertTrue(np.allclose(probs_log1, probs_log2, atol=1e-8)) 
开发者ID:romanorac,项目名称:discomll,代码行数:18,代码来源:tests_classification.py

示例2: test_different_results

# 需要导入模块: from sklearn import naive_bayes [as 别名]
# 或者: from sklearn.naive_bayes import GaussianNB [as 别名]
def test_different_results(self):
        from sklearn.naive_bayes import GaussianNB as sk_nb
        from sklearn import datasets

        global_seed(12345)
        dataset = datasets.load_iris()

        x_train, x_test, y_train, y_test = train_test_split(dataset.data, dataset.target, test_size=.2)

        bounds = ([4.3, 2.0, 1.0, 0.1], [7.9, 4.4, 6.9, 2.5])

        clf_dp = GaussianNB(epsilon=1.0, bounds=bounds)
        clf_non_private = sk_nb()

        for clf in [clf_dp, clf_non_private]:
            clf.fit(x_train, y_train)

        same_prediction = clf_dp.predict(x_test) == clf_non_private.predict(x_test)

        self.assertFalse(np.all(same_prediction)) 
开发者ID:IBM,项目名称:differential-privacy-library,代码行数:22,代码来源:test_GaussianNB.py

示例3: test_22_gaussian_nb

# 需要导入模块: from sklearn import naive_bayes [as 别名]
# 或者: from sklearn.naive_bayes import GaussianNB [as 别名]
def test_22_gaussian_nb(self):
        print("\ntest 22 (GaussianNB without preprocessing) [binary-class]\n")
        X, X_test, y, features, target, test_file = self.data_utility.get_data_for_binary_classification()

        model = GaussianNB()
        pipeline_obj = Pipeline([
            ("model", model)
        ])
        pipeline_obj.fit(X,y)
        file_name = 'test22sklearn.pmml'
        
        skl_to_pmml(pipeline_obj, features, target, file_name)
        model_name  = self.adapa_utility.upload_to_zserver(file_name)
        predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
        model_pred = pipeline_obj.predict(X_test)
        model_prob = pipeline_obj.predict_proba(X_test)
        self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
        self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True) 
开发者ID:nyoka-pmml,项目名称:nyoka,代码行数:20,代码来源:testScoreWithAdapaSklearn.py

示例4: test_23_gaussian_nb

# 需要导入模块: from sklearn import naive_bayes [as 别名]
# 或者: from sklearn.naive_bayes import GaussianNB [as 别名]
def test_23_gaussian_nb(self):
        print("\ntest 23 (GaussianNB without preprocessing) [multi-class]\n")
        X, X_test, y, features, target, test_file = self.data_utility.get_data_for_multi_class_classification()

        model = GaussianNB()
        pipeline_obj = Pipeline([
            ("model", model)
        ])
        pipeline_obj.fit(X,y)
        file_name = 'test23sklearn.pmml'
        
        skl_to_pmml(pipeline_obj, features, target, file_name)
        model_name  = self.adapa_utility.upload_to_zserver(file_name)
        predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
        model_pred = pipeline_obj.predict(X_test)
        model_prob = pipeline_obj.predict_proba(X_test)
        self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
        self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True) 
开发者ID:nyoka-pmml,项目名称:nyoka,代码行数:20,代码来源:testScoreWithAdapaSklearn.py

示例5: test_24_gaussian_nb

# 需要导入模块: from sklearn import naive_bayes [as 别名]
# 或者: from sklearn.naive_bayes import GaussianNB [as 别名]
def test_24_gaussian_nb(self):
        print("\ntest 24 (GaussianNB with preprocessing) [multi-class]\n")
        X, X_test, y, features, target, test_file = self.data_utility.get_data_for_multi_class_classification()

        model = GaussianNB()
        pipeline_obj = Pipeline([
            ('scaler', StandardScaler()),
            ("model", model)
        ])
        pipeline_obj.fit(X,y)
        file_name = 'test24sklearn.pmml'
        
        skl_to_pmml(pipeline_obj, features, target, file_name)
        model_name  = self.adapa_utility.upload_to_zserver(file_name)
        predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, test_file)
        model_pred = pipeline_obj.predict(X_test)
        model_prob = pipeline_obj.predict_proba(X_test)
        self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
        self.assertEqual(self.adapa_utility.compare_probability(probabilities, model_prob), True) 
开发者ID:nyoka-pmml,项目名称:nyoka,代码行数:21,代码来源:testScoreWithAdapaSklearn.py

示例6: __init__

# 需要导入模块: from sklearn import naive_bayes [as 别名]
# 或者: from sklearn.naive_bayes import GaussianNB [as 别名]
def __init__(self, classifier=FaceClassifierModels.DEFAULT):
        self._clf = None
        if classifier == FaceClassifierModels.LINEAR_SVM:
            self._clf = SVC(C=1.0, kernel="linear", probability=True)
        elif classifier == FaceClassifierModels.NAIVE_BAYES:
            self._clf = GaussianNB()
        elif classifier == FaceClassifierModels.RBF_SVM:
            self._clf = SVC(C=1, kernel='rbf', probability=True, gamma=2)
        elif classifier == FaceClassifierModels.NEAREST_NEIGHBORS:
            self._clf = KNeighborsClassifier(1)
        elif classifier == FaceClassifierModels.DECISION_TREE:
            self._clf = DecisionTreeClassifier(max_depth=5)
        elif classifier == FaceClassifierModels.RANDOM_FOREST:
            self._clf = RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1)
        elif classifier == FaceClassifierModels.NEURAL_NET:
            self._clf = MLPClassifier(alpha=1)
        elif classifier == FaceClassifierModels.ADABOOST:
            self._clf = AdaBoostClassifier()
        elif classifier == FaceClassifierModels.QDA:
            self._clf = QuadraticDiscriminantAnalysis()
        print("classifier={}".format(FaceClassifierModels(classifier))) 
开发者ID:richmondu,项目名称:libfaceid,代码行数:23,代码来源:classifier.py

示例7: getModels

# 需要导入模块: from sklearn import naive_bayes [as 别名]
# 或者: from sklearn.naive_bayes import GaussianNB [as 别名]
def getModels():
    result = []
    result.append("LinearRegression")
    result.append("BayesianRidge")
    result.append("ARDRegression")
    result.append("ElasticNet")
    result.append("HuberRegressor")
    result.append("Lasso")
    result.append("LassoLars")
    result.append("Rigid")
    result.append("SGDRegressor")
    result.append("SVR")
    result.append("MLPClassifier")
    result.append("KNeighborsClassifier")
    result.append("SVC")
    result.append("GaussianProcessClassifier")
    result.append("DecisionTreeClassifier")
    result.append("RandomForestClassifier")
    result.append("AdaBoostClassifier")
    result.append("GaussianNB")
    result.append("LogisticRegression")
    result.append("QuadraticDiscriminantAnalysis")
    return result 
开发者ID:tech-quantum,项目名称:sia-cog,代码行数:25,代码来源:scikitlearn.py

示例8: Faceidentifier

# 需要导入模块: from sklearn import naive_bayes [as 别名]
# 或者: from sklearn.naive_bayes import GaussianNB [as 别名]
def Faceidentifier( trainDataSimplified,trainLabel,testDataSimplified,testLabel):     #three different kinds of classifers
    print("=====================================")    
    print("GaussianNB")
    clf1 = GaussianNB()
    clf1.fit(trainDataSimplified,np.ravel(trainLabel))
    predictTestLabel1 = clf1.predict(testDataSimplified)
    show_accuracy(predictTestLabel1,testLabel)
    print()
    
    print("SVC")
    clf3 = SVC(C=8.0)
    clf3.fit(trainDataSimplified,np.ravel(trainLabel))
    predictTestLabel3 = clf3.predict(testDataSimplified)
    show_accuracy(predictTestLabel3,testLabel)
    print()
    
    print("LogisticRegression")
    clf4 = LogisticRegression()
    clf4.fit(trainDataSimplified,np.ravel(trainLabel))
    predictTestLabel4 = clf4.predict(testDataSimplified)
    show_accuracy(predictTestLabel4,testLabel)
    print()
    print("=====================================") 
开发者ID:LiangjunFeng,项目名称:Machine-Learning,代码行数:25,代码来源:A10.SFA.py

示例9: __init__

# 需要导入模块: from sklearn import naive_bayes [as 别名]
# 或者: from sklearn.naive_bayes import GaussianNB [as 别名]
def __init__(self, distributions, weights=None, **kwargs):
        self.models = []
        for dist in distributions:
            dist = NaiveBayesianDistribution.from_string(dist)
            if dist is NaiveBayesianDistribution.GAUSSIAN:
                model = nb.GaussianNB(**kwargs)
            elif dist is NaiveBayesianDistribution.MULTINOMIAL:
                model = nb.MultinomialNB(**kwargs)
            elif dist is NaiveBayesianDistribution.BERNOULLI:
                model = nb.BernoulliNB(**kwargs)
            else:
                raise ValueError('Unknown distribution: {}.'.format(dist))
            kwargs['fit_prior'] = False  # Except the first model.
            self.models.append(model)

        self.weights = weights 
开发者ID:vacancy,项目名称:Jacinle,代码行数:18,代码来源:hybrid_nb.py

示例10: test_smoke

# 需要导入模块: from sklearn import naive_bayes [as 别名]
# 或者: from sklearn.naive_bayes import GaussianNB [as 别名]
def test_smoke():
    a = nb.GaussianNB()
    b = nb_.GaussianNB()
    a.fit(X, y)
    X_ = X.compute()
    y_ = y.compute()
    b.fit(X_, y_)

    assert_eq(a.class_prior_.compute(), b.class_prior_)
    assert_eq(a.class_count_.compute(), b.class_count_)
    assert_eq(a.theta_.compute(), b.theta_)
    assert_eq(a.sigma_.compute(), b.sigma_)

    assert_eq(a.predict_proba(X).compute(), b.predict_proba(X_))
    assert_eq(a.predict(X).compute(), b.predict(X_))
    assert_eq(a.predict_log_proba(X).compute(), b.predict_log_proba(X_)) 
开发者ID:dask,项目名称:dask-ml,代码行数:18,代码来源:test_naive_bayes.py

示例11: define_clfs_params

# 需要导入模块: from sklearn import naive_bayes [as 别名]
# 或者: from sklearn.naive_bayes import GaussianNB [as 别名]
def define_clfs_params(self):
        '''
        Defines all relevant parameters and classes for classfier objects.
        Edit these if you wish to change parameters.
        '''
        # These are the classifiers
        self.clfs = {
            'RF': RandomForestClassifier(n_estimators = 50, n_jobs = -1),
            'ET': ExtraTreesClassifier(n_estimators = 10, n_jobs = -1, criterion = 'entropy'),
            'AB': AdaBoostClassifier(DecisionTreeClassifier(max_depth = [1, 5, 10, 15]), algorithm = "SAMME", n_estimators = 200),
            'LR': LogisticRegression(penalty = 'l1', C = 1e5),
            'SVM': svm.SVC(kernel = 'linear', probability = True, random_state = 0),
            'GB': GradientBoostingClassifier(learning_rate = 0.05, subsample = 0.5, max_depth = 6, n_estimators = 10),
            'NB': GaussianNB(),
            'DT': DecisionTreeClassifier(),
            'SGD': SGDClassifier(loss = 'log', penalty = 'l2'),
            'KNN': KNeighborsClassifier(n_neighbors = 3)
            }
        # These are the parameters which will be run through
        self.params = {
             'RF':{'n_estimators': [1,10,100,1000], 'max_depth': [10, 15,20,30,40,50,60,70,100], 'max_features': ['sqrt','log2'],'min_samples_split': [2,5,10], 'random_state': [1]},
             'LR': {'penalty': ['l1','l2'], 'C': [0.00001,0.0001,0.001,0.01,0.1,1,10], 'random_state': [1]},
             'SGD': {'loss': ['log'], 'penalty': ['l2','l1','elasticnet'], 'random_state': [1]},
             'ET': {'n_estimators': [1,10,100,1000], 'criterion' : ['gini', 'entropy'], 'max_depth': [1,3,5,10,15], 'max_features': ['sqrt','log2'],'min_samples_split': [2,5,10], 'random_state': [1]},
             'AB': {'algorithm': ['SAMME', 'SAMME.R'], 'n_estimators': [1,10,100,1000], 'random_state': [1]},
             'GB': {'n_estimators': [1,10,100,1000], 'learning_rate' : [0.001,0.01,0.05,0.1,0.5],'subsample' : [0.1,0.5,1.0], 'max_depth': [1,3,5,10,20,50,100], 'random_state': [1]},
             'NB': {},
             'DT': {'criterion': ['gini', 'entropy'], 'max_depth': [1,2,15,20,30,40,50], 'max_features': ['sqrt','log2'],'min_samples_split': [2,5,10], 'random_state': [1]},
             'SVM' :{'C' :[0.00001,0.0001,0.001,0.01,0.1,1,10],'kernel':['linear'], 'random_state': [1]},
             'KNN' :{'n_neighbors': [1,5,10,25,50,100],'weights': ['uniform','distance'],'algorithm': ['auto','ball_tree','kd_tree']}
             } 
开发者ID:aldengolab,项目名称:fake-news-detection,代码行数:33,代码来源:model_loop.py

示例12: naive_bayes_classifier

# 需要导入模块: from sklearn import naive_bayes [as 别名]
# 或者: from sklearn.naive_bayes import GaussianNB [as 别名]
def naive_bayes_classifier(x_train, y, x_predict):
    gnb = GaussianNB()
    gnb.fit(x_train, y)
    prediction = gnb.predict(x_predict)
    return prediction 
开发者ID:5hirish,项目名称:adam_qas,代码行数:7,代码来源:question_classifier.py

示例13: naive_bayes_classifier

# 需要导入模块: from sklearn import naive_bayes [as 别名]
# 或者: from sklearn.naive_bayes import GaussianNB [as 别名]
def naive_bayes_classifier(df_question_train, df_question_class):
    gnb = GaussianNB()
    gnb.fit(df_question_train, df_question_class)
    logger.info("Gaussian Naive Bayes: {0}".format(gnb))

    return gnb 
开发者ID:5hirish,项目名称:adam_qas,代码行数:8,代码来源:question_classifier_trainer.py

示例14: script_run

# 需要导入模块: from sklearn import naive_bayes [as 别名]
# 或者: from sklearn.naive_bayes import GaussianNB [as 别名]
def script_run():
    # 产生keyword
    kw_list = build_key_word("train.txt")
    # 保存数据
    fp = open("new_word.txt", encoding="utf-8", mode="w")
    for word in kw_list:
        fp.write(word + "\n")
    fp.close()
   # kw_list = load_key_words("word.txt")
    feature, label = get_feature("train.txt", kw_list)
    gnb = GaussianNB()
    gnb = gnb.fit(feature, label)
    joblib.dump(gnb, 'model/gnb.model')
    print("训练完成")
    # print(feature,label) 
开发者ID:MashiMaroLjc,项目名称:dudulu,代码行数:17,代码来源:tool.py

示例15: NB

# 需要导入模块: from sklearn import naive_bayes [as 别名]
# 或者: from sklearn.naive_bayes import GaussianNB [as 别名]
def NB():
    loader = MnistLoader(flatten=True, data_path='../data', var_per=None)
    model = GaussianNB()

    model.fit(loader.data_train, loader.label_train)
    print('model trained')
    res = model.score(loader.data_test, loader.label_test)
    print(res)

    return res 
开发者ID:cxy1997,项目名称:MNIST-baselines,代码行数:12,代码来源:NaiveBayes.py


注:本文中的sklearn.naive_bayes.GaussianNB方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。