当前位置: 首页>>代码示例>>Python>>正文


Python datasets.load_iris函数代码示例

本文整理汇总了Python中sklearn.datasets.load_iris函数的典型用法代码示例。如果您正苦于以下问题:Python load_iris函数的具体用法?Python load_iris怎么用?Python load_iris使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了load_iris函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_LabelBinarizer2

    def test_LabelBinarizer2(self):
        arr = np.array(['X', 'Y', 'Z', 'X'])
        s = pdml.ModelSeries(arr)

        lb = s.preprocessing.LabelBinarizer()
        s.fit(lb)

        binarized = s.transform(lb)
        self.assertTrue(isinstance(binarized, pdml.ModelFrame))

        expected = pd.DataFrame({0: [1, 0, 0, 1], 1: [0, 1, 0, 0], 2: [0, 0, 1, 0]})
        self.assert_frame_equal(binarized, expected)

        df = pdml.ModelFrame(datasets.load_iris())
        df.target.fit(lb)
        binarized = df.target.transform(lb)

        expected = pd.DataFrame({0: [1] * 50 + [0] * 100,
                                 1: [0] * 50 + [1] * 50 + [0] * 50,
                                 2: [0] * 100 + [1] * 50})
        self.assert_frame_equal(binarized, expected)

        df = pdml.ModelFrame(datasets.load_iris())
        df.target.fit(lb)
        df.target = df.target.transform(lb)
        self.assertEqual(df.shape, (150, 7))
        self.assert_frame_equal(df.target, expected)
开发者ID:Sandy4321,项目名称:pandas-ml,代码行数:27,代码来源:test_preprocessing.py

示例2: main

def main():
    all_targets = load_iris()['target']
    data_set = load_iris()['data']

    train_set, test_set, targets, targets_test = train_test_split(data_set, all_targets, train_size=0.9)

    targets_class = (transform_target_vars(targets, class_num=0),
                     transform_target_vars(targets, class_num=1),
                     transform_target_vars(targets, class_num=2))

    for n_trees in range(1, 150, 10):
        classifiers = (GradientBoostingClassifier(n_trees=n_trees, max_tree_depth=1, n_features=3),
                       GradientBoostingClassifier(n_trees=n_trees, max_tree_depth=1, n_features=3),
                       GradientBoostingClassifier(n_trees=n_trees, max_tree_depth=1, n_features=3))

        classifiers[0].fit(train_set, targets_class[0])
        classifiers[1].fit(train_set, targets_class[1])
        classifiers[2].fit(train_set, targets_class[2])

        predicts = (classifiers[0].predict(test_set),
                    classifiers[1].predict(test_set),
                    classifiers[2].predict(test_set))

        fin_predict = decision_function(predicts[0], predicts[1], predicts[2])

        print "Number of trees:", n_trees, ":", accuracy_score(targets_test, fin_predict)
开发者ID:antongoy,项目名称:sfera_dm,代码行数:26,代码来源:gradient_boosting.py

示例3: setUp

 def setUp(self):
     self.x = datasets.load_iris().data
     self.y = datasets.load_iris().target
     # test without pretraining
     self.model = dbn([nn.layer(4, linear, dlinear),
                       nn.layer(5, tanh, dtanh),
                       nn.layer(1, linear, dlinear, bias=False)], False)
开发者ID:arider,项目名称:riderml,代码行数:7,代码来源:test_dbn.py

示例4: main

def main():
    data_set = load_iris()['data']
    target_set = load_iris()['target']

    cartTree = CartTree(min_leaf_size=5)

    cartTree.fit(data_set, target_set)

    print cartTree.tree
    print target_set
    print numpy.array([int(round(cartTree.predict([x]))) for x in data_set])
开发者ID:shayakhmetov,项目名称:sfera-DataMining,代码行数:11,代码来源:CART.py

示例5: createDataSet

def createDataSet():
    dataSet = datasets.load_iris()
    iris_X = dataSet.data
    iris_y = dataSet.target
    np.random.seed(1)
    indices = np.random.permutation(len(iris_X))
    iris_X_train = iris_X[indices[:-10]]
    iris_y_train = iris_y[indices[:-10]]
    iris_X_test  = iris_X[indices[-10:]]
    iris_y_test  = iris_y[indices[-10:]]
    dataSet = datasets.load_iris()
    return iris_X_train, iris_y_train, iris_X_test, iris_y_test
开发者ID:LeonKennedy,项目名称:LearningByLanguage,代码行数:12,代码来源:decisionTress.py

示例6: test_load_iris

def test_load_iris():
    res = load_iris()
    assert_equal(res.data.shape, (150, 4))
    assert_equal(res.target.size, 150)
    assert_equal(res.target_names.size, 3)
    assert_true(res.DESCR)

    # test return_X_y option
    X_y_tuple = load_iris(return_X_y=True)
    bunch = load_iris()
    assert_true(isinstance(X_y_tuple, tuple))
    assert_array_equal(X_y_tuple[0], bunch.data)
    assert_array_equal(X_y_tuple[1], bunch.target)
开发者ID:NazBen,项目名称:scikit-learn,代码行数:13,代码来源:test_base.py

示例7: load_iris_data

def load_iris_data() :

    # load the iris dataset from the sklearn module
    iris = datasets.load_iris()

    # extract the elements of the data that are used in this exercise
    return (iris.data, iris.target, iris.target_names)
开发者ID:ruslan-d,项目名称:GA_Homework,代码行数:7,代码来源:hw1.py

示例8: testIris_proba

 def testIris_proba(self):
     random.seed(42)
     iris = datasets.load_iris()
     classifier = skflow.TensorFlowClassifier(n_classes=3)
     classifier.fit(iris.data, iris.target)
     score = log_loss(iris.target, classifier.predict_proba(iris.data))
     self.assertLess(score, 0.8, "Failed with score = {0}".format(score))
开发者ID:hellios78,项目名称:skflow,代码行数:7,代码来源:test_base.py

示例9: testIrisSummaries

 def testIrisSummaries(self):
     random.seed(42)
     iris = datasets.load_iris()
     classifier = skflow.TensorFlowLinearClassifier(n_classes=3)
     classifier.fit(iris.data, iris.target, logdir='/tmp/skflow_tests/')
     score = accuracy_score(classifier.predict(iris.data), iris.target)
     self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
开发者ID:hellios78,项目名称:skflow,代码行数:7,代码来源:test_base.py

示例10: main

def main():

    # http://scikit-learn.org/stable/tutorial/basic/tutorial.html#loading-an-example-dataset
    # "A dataset is a dictionary-like object that holds all the data and some
    # metadata about the data. This data is stored in the .data member, which
    # is a n_samples, n_features array. In the case of supervised problem, one
    # or more response variables are stored in the .target member."

    # Toy datasets

    iris = datasets.load_iris()         # The iris dataset (classification)
    digits = datasets.load_digits()     # The digits dataset (classification)

    #boston = datasets.load_boston()     # The boston house-prices dataset (regression)
    #diabetes = datasets.load_diabetes() # The diabetes dataset (regression)
    #linnerud = datasets.load_linnerud() # The linnerud dataset (multivariate regression)

    print(iris.feature_names)
    print(iris.data)
    print(iris.target_names)
    print(iris.target)

    print(digits.images[0])
    print(digits.target_names)
    print(digits.target)

    plt.imshow(digits.images[0], cmap='gray', interpolation='nearest')
    plt.show()
开发者ID:jeremiedecock,项目名称:snippets,代码行数:28,代码来源:datasets.py

示例11: test_sparse_fit_params

def test_sparse_fit_params():
    iris = load_iris()
    X, y = iris.data, iris.target
    clf = MockClassifier()
    fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
    a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
    assert_array_equal(a, np.ones(3))
开发者ID:AppliedArtificialIntelligence,项目名称:scikit-learn,代码行数:7,代码来源:test_cross_validation.py

示例12: test_score_memmap

def test_score_memmap():
    # Ensure a scalar score of memmap type is accepted
    iris = load_iris()
    X, y = iris.data, iris.target
    clf = MockClassifier()
    tf = tempfile.NamedTemporaryFile(mode='wb', delete=False)
    tf.write(b'Hello world!!!!!')
    tf.close()
    scores = np.memmap(tf.name, dtype=np.float64)
    score = np.memmap(tf.name, shape=(), mode='r', dtype=np.float64)
    try:
        cross_val_score(clf, X, y, scoring=lambda est, X, y: score)
        # non-scalar should still fail
        assert_raises(ValueError, cross_val_score, clf, X, y,
                      scoring=lambda est, X, y: scores)
    finally:
        # Best effort to release the mmap file handles before deleting the
        # backing file under Windows
        scores, score = None, None
        for _ in range(3):
            try:
                os.unlink(tf.name)
                break
            except WindowsError:
                sleep(1.)
开发者ID:YinongLong,项目名称:scikit-learn,代码行数:25,代码来源:test_validation.py

示例13: test_classification_report_multiclass_with_digits

def test_classification_report_multiclass_with_digits():
    """Test performance report with added digits in floating point values"""
    iris = datasets.load_iris()
    y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)

    # print classification report with class names
    expected_report = """\
             precision    recall  f1-score   support

     setosa    0.82609   0.79167   0.80851        24
 versicolor    0.33333   0.09677   0.15000        31
  virginica    0.41860   0.90000   0.57143        20

avg / total    0.51375   0.53333   0.47310        75
"""
    report = classification_report(
        y_true, y_pred, labels=np.arange(len(iris.target_names)),
        target_names=iris.target_names, digits=5)
    assert_equal(report, expected_report)

    # print classification report with label detection
    expected_report = """\
             precision    recall  f1-score   support

          0       0.83      0.79      0.81        24
          1       0.33      0.10      0.15        31
          2       0.42      0.90      0.57        20

avg / total       0.51      0.53      0.47        75
"""
    report = classification_report(y_true, y_pred)
    assert_equal(report, expected_report)
开发者ID:nateyoder,项目名称:scikit-learn,代码行数:32,代码来源:test_classification.py

示例14: test_classification_report_multiclass

def test_classification_report_multiclass():
    """Test performance report"""
    iris = datasets.load_iris()
    y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)

    # print classification report with class names
    expected_report = """\
             precision    recall  f1-score   support

     setosa       0.83      0.79      0.81        24
 versicolor       0.33      0.10      0.15        31
  virginica       0.42      0.90      0.57        20

avg / total       0.51      0.53      0.47        75
"""
    report = classification_report(
        y_true, y_pred, labels=np.arange(len(iris.target_names)),
        target_names=iris.target_names)
    assert_equal(report, expected_report)

    # print classification report with label detection
    expected_report = """\
             precision    recall  f1-score   support

          0       0.83      0.79      0.81        24
          1       0.33      0.10      0.15        31
          2       0.42      0.90      0.57        20

avg / total       0.51      0.53      0.47        75
"""
    report = classification_report(y_true, y_pred)
    assert_equal(report, expected_report)
开发者ID:nateyoder,项目名称:scikit-learn,代码行数:32,代码来源:test_classification.py

示例15: test_classification_report

def test_classification_report():
    """Test performance report"""
    iris = datasets.load_iris()
    y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)

    # print classification report with class names
    expected_report = """\
             precision    recall  f1-score   support

     setosa       0.82      0.92      0.87        25
 versicolor       0.56      0.17      0.26        30
  virginica       0.47      0.90      0.62        20

avg / total       0.62      0.61      0.56        75
"""
    report = classification_report(
        y_true, y_pred, labels=range(len(iris.target_names)),
        target_names=iris.target_names)
    assert_equal(report, expected_report)

    # print classification report with label detection
    expected_report = """\
             precision    recall  f1-score   support

          0       0.82      0.92      0.87        25
          1       0.56      0.17      0.26        30
          2       0.47      0.90      0.62        20

avg / total       0.62      0.61      0.56        75
"""
    report = classification_report(y_true, y_pred)
    assert_equal(report, expected_report)
开发者ID:conradlee,项目名称:scikit-learn,代码行数:32,代码来源:test_metrics.py


注:本文中的sklearn.datasets.load_iris函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。