当前位置: 首页>>代码示例>>Python>>正文


Python datasets.load_iris方法代码示例

本文整理汇总了Python中sklearn.datasets.load_iris方法的典型用法代码示例。如果您正苦于以下问题:Python datasets.load_iris方法的具体用法?Python datasets.load_iris怎么用?Python datasets.load_iris使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.datasets的用法示例。


在下文中一共展示了datasets.load_iris方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_different_results

# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def test_different_results(self):
        from sklearn import datasets
        from sklearn import linear_model
        from sklearn.model_selection import train_test_split

        dataset = datasets.load_iris()
        X_train, X_test, y_train, y_test = train_test_split(dataset.data, dataset.target, test_size=0.2)

        clf = LogisticRegression(data_norm=12)
        clf.fit(X_train, y_train)

        predict1 = clf.predict(X_test)

        clf = LogisticRegression(data_norm=12)
        clf.fit(X_train, y_train)

        predict2 = clf.predict(X_test)

        clf = linear_model.LogisticRegression(solver="lbfgs", multi_class="ovr")
        clf.fit(X_train, y_train)

        predict3 = clf.predict(X_test)

        self.assertFalse(np.all(predict1 == predict2))
        self.assertFalse(np.all(predict3 == predict1) and np.all(predict3 == predict2)) 
开发者ID:IBM,项目名称:differential-privacy-library,代码行数:27,代码来源:test_LogisticRegression.py

示例2: test_same_results

# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def test_same_results(self):
        from sklearn import datasets
        from sklearn.model_selection import train_test_split
        from sklearn import linear_model

        dataset = datasets.load_iris()
        X_train, X_test, y_train, y_test = train_test_split(dataset.data, dataset.target, test_size=0.2)

        clf = LogisticRegression(data_norm=12, epsilon=float("inf"))
        clf.fit(X_train, y_train)

        predict1 = clf.predict(X_test)

        clf = linear_model.LogisticRegression(solver="lbfgs", multi_class="ovr")
        clf.fit(X_train, y_train)

        predict2 = clf.predict(X_test)

        self.assertTrue(np.all(predict1 == predict2)) 
开发者ID:IBM,项目名称:differential-privacy-library,代码行数:21,代码来源:test_LogisticRegression.py

示例3: test_different_results

# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def test_different_results(self):
        from sklearn import datasets
        from sklearn import linear_model
        from sklearn.model_selection import train_test_split

        dataset = datasets.load_iris()
        X_train, X_test, y_train, y_test = train_test_split(dataset.data, dataset.target, test_size=0.2)

        clf = LinearRegression(data_norm=12, bounds_X=([4.3, 2.0, 1.1, 0.1], [7.9, 4.4, 6.9, 2.5]), bounds_y=(0, 2))
        clf.fit(X_train, y_train)

        predict1 = clf.predict(X_test)

        clf = LinearRegression(data_norm=12, bounds_X=([4.3, 2.0, 1.1, 0.1], [7.9, 4.4, 6.9, 2.5]), bounds_y=(0, 2))
        clf.fit(X_train, y_train)

        predict2 = clf.predict(X_test)

        clf = linear_model.LinearRegression()
        clf.fit(X_train, y_train)

        predict3 = clf.predict(X_test)

        self.assertFalse(np.all(predict1 == predict2))
        self.assertFalse(np.all(predict3 == predict1) and np.all(predict3 == predict2)) 
开发者ID:IBM,项目名称:differential-privacy-library,代码行数:27,代码来源:test_LinearRegression.py

示例4: test_same_results

# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def test_same_results(self):
        from sklearn import datasets
        from sklearn.model_selection import train_test_split
        from sklearn import linear_model

        dataset = datasets.load_iris()
        X_train, X_test, y_train, y_test = train_test_split(dataset.data, dataset.target, test_size=0.2)

        clf = LinearRegression(data_norm=12, epsilon=float("inf"),
                               bounds_X=([4.3, 2.0, 1.0, 0.1], [7.9, 4.4, 6.9, 2.5]), bounds_y=(0, 2))
        clf.fit(X_train, y_train)

        predict1 = clf.predict(X_test)

        clf = linear_model.LinearRegression(normalize=False)
        clf.fit(X_train, y_train)

        predict2 = clf.predict(X_test)

        self.assertTrue(np.allclose(predict1, predict2)) 
开发者ID:IBM,项目名称:differential-privacy-library,代码行数:22,代码来源:test_LinearRegression.py

示例5: test_different_results

# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def test_different_results(self):
        from sklearn.naive_bayes import GaussianNB as sk_nb
        from sklearn import datasets

        global_seed(12345)
        dataset = datasets.load_iris()

        x_train, x_test, y_train, y_test = train_test_split(dataset.data, dataset.target, test_size=.2)

        bounds = ([4.3, 2.0, 1.0, 0.1], [7.9, 4.4, 6.9, 2.5])

        clf_dp = GaussianNB(epsilon=1.0, bounds=bounds)
        clf_non_private = sk_nb()

        for clf in [clf_dp, clf_non_private]:
            clf.fit(x_train, y_train)

        same_prediction = clf_dp.predict(x_test) == clf_non_private.predict(x_test)

        self.assertFalse(np.all(same_prediction)) 
开发者ID:IBM,项目名称:differential-privacy-library,代码行数:22,代码来源:test_GaussianNB.py

示例6: test_with_iris

# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def test_with_iris(self):
        global_seed(12345)
        from sklearn import datasets
        dataset = datasets.load_iris()

        x_train, x_test, y_train, y_test = train_test_split(dataset.data, dataset.target, test_size=.2)

        bounds = ([4.3, 2.0, 1.0, 0.1], [7.9, 4.4, 6.9, 2.5])

        clf = GaussianNB(epsilon=5.0, bounds=bounds)
        clf.fit(x_train, y_train)

        accuracy = clf.score(x_test, y_test)
        counts = clf.class_count_.copy()
        self.assertGreater(accuracy, 0.5)

        clf.partial_fit(x_train, y_train)
        new_counts = clf.class_count_
        self.assertEqual(np.sum(new_counts), np.sum(counts) * 2) 
开发者ID:IBM,项目名称:differential-privacy-library,代码行数:21,代码来源:test_GaussianNB.py

示例7: setUp

# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def setUp(self):
        iris = load_iris()

        theano.config.floatX = 'float32'
        X = iris.data.astype(theano.config.floatX)
        y = iris.target.astype(np.int32)
        y_ohe = np_utils.to_categorical(y)

        model = Sequential()
        model.add(Dense(input_dim=X.shape[1], output_dim=5, activation='tanh'))
        model.add(Dense(input_dim=5, output_dim=y_ohe.shape[1], activation='sigmoid'))
        model.compile(loss='categorical_crossentropy', optimizer='sgd')
        model.fit(X, y_ohe, nb_epoch=10, batch_size=1, verbose=3, validation_data=None)

        params = {'copyright': 'Václav Čadek', 'model_name': 'Iris Model'}
        self.model = model
        self.pmml = keras2pmml(self.model, **params)
        self.num_inputs = self.model.input_shape[1]
        self.num_outputs = self.model.output_shape[1]
        self.num_connection_layers = len(self.model.layers)
        self.features = ['x{}'.format(i) for i in range(self.num_inputs)]
        self.class_values = ['y{}'.format(i) for i in range(self.num_outputs)] 
开发者ID:vaclavcadek,项目名称:keras2pmml,代码行数:24,代码来源:sequential.py

示例8: test_bagged_imputer_classification

# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def test_bagged_imputer_classification():
    iris = load_iris()

    # make DF, add species col
    X = pd.DataFrame.from_records(data=iris.data, columns=iris.feature_names)
    X['species'] = iris.target

    # shuffle...
    X = shuffle_dataframe(X)

    # set random indices to be null.. 15% should be good
    rands = np.random.rand(X.shape[0])
    mask = rands > 0.85
    X['species'].iloc[mask] = np.nan

    # define imputer, assert no missing
    imputer = BaggedCategoricalImputer(cols=['species'])
    y = imputer.fit_transform(X)
    assert y['species'].isnull().sum() == 0, 'expected no null...'

    # now test with a different estimator
    imputer = BaggedCategoricalImputer(cols=['species'], base_estimator=RandomForestClassifier())
    y = imputer.fit_transform(X)
    assert y['species'].isnull().sum() == 0, 'expected no null...' 
开发者ID:tgsmith61591,项目名称:skutil,代码行数:26,代码来源:test_impute.py

示例9: test_few_classification

# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def test_few_classification():
    """test_few.py: tests default classification settings"""
    np.random.seed(42)
    X, y = load_iris(return_X_y=True)
    train,test = train_test_split(np.arange(X.shape[0]), train_size=0.75,
                                  test_size=0.25)
    few = FEW(classification=True,population_size='1x',generations=10)
    few.fit(X[train],y[train])

    print('train score:', few.score(X[train],y[train]))
    print('test score:', few.score(X[test],y[test]))

    # test boolean output
    few = FEW(classification=True,otype='b',population_size='2x',
              seed_with_ml=False,generations=10)
    np.random.seed(42)
    few.fit(X[train],y[train])

    print('train score:', few.score(X[train],y[train]))
    print('test score:', few.score(X[test],y[test]))
    few.print_model() 
开发者ID:lacava,项目名称:few,代码行数:23,代码来源:test_few.py

示例10: main

# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def main():
    raw_data = load_iris()
    data = pd.DataFrame(raw_data["data"], columns=raw_data["feature_names"])

    pipeline = FeatureUnion([
        ("1", make_pipeline(
            FunctionTransformer(lambda X: X.loc[:, ["sepal length (cm)"]]),
            # other transformations
        )),
        ("2", make_pipeline(
            FunctionTransformer(lambda X: X.loc[:, ["sepal width (cm)"]]),
            # other transformations
        ))
    ])

    X = pipeline.fit_transform(data)
    print(X["sepal length (cm)"].mean())
    print(X["sepal width (cm)"].mean()) 
开发者ID:marrrcin,项目名称:pandas-feature-union,代码行数:20,代码来源:1_problem.py

示例11: main

# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def main():
    raw_data = load_iris()
    data = pd.DataFrame(raw_data["data"], columns=raw_data["feature_names"])
    data.loc[:, "class"] = raw_data["target"]

    pipeline = PandasFeatureUnion([
        ("1", make_pipeline(
            PandasTransform(lambda X: X.loc[:, ["sepal length (cm)"]]),
            # other transformations
        )),
        ("2", make_pipeline(
            PandasTransform(lambda X: X.loc[:, ["sepal width (cm)"]]),
            # other transformations
        ))
    ])

    X = pipeline.fit_transform(data)
    print(X["sepal length (cm)"].mean())
    print(X["sepal width (cm)"].mean()) 
开发者ID:marrrcin,项目名称:pandas-feature-union,代码行数:21,代码来源:3_feature_union_solution.py

示例12: main

# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def main():
    raw_data = load_iris()
    data = pd.DataFrame(raw_data["data"], columns=raw_data["feature_names"])
    data.loc[:, "class"] = raw_data["target"]

    pipeline = FeatureUnion([
        ("1", make_pipeline(
            PandasTransform(lambda X: X.loc[:, ["sepal length (cm)"]]),
            # other transformations
        )),
        ("2", make_pipeline(
            PandasTransform(lambda X: X.loc[:, ["sepal width (cm)"]]),
            # other transformations
        ))
    ])

    X = pipeline.fit_transform(data)
    print(X["sepal length (cm)"].mean())
    print(X["sepal width (cm)"].mean()) 
开发者ID:marrrcin,项目名称:pandas-feature-union,代码行数:21,代码来源:2_transform_solution.py

示例13: test_build_meowa_factory

# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def test_build_meowa_factory():

    iris = datasets.load_iris()
    X = iris.data
    y = iris.target

    from sklearn.preprocessing import MinMaxScaler
    X = MinMaxScaler().fit_transform(X)

    l = nfpc.FuzzyPatternClassifier(membership_factory=t_factory,
                                    aggregation_factory=nfpc.MEOWAFactory())

    from sklearn.model_selection import cross_val_score

    scores = cross_val_score(l, X, y, cv=10)
    mean = np.mean(scores)

    assert 0.80 < mean 
开发者ID:sorend,项目名称:fylearn,代码行数:20,代码来源:test_nfpc.py

示例14: test_build_ps_owa_factory

# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def test_build_ps_owa_factory():

    iris = datasets.load_iris()
    X = iris.data
    y = iris.target

    from sklearn.preprocessing import MinMaxScaler
    X = MinMaxScaler().fit_transform(X)

    l = nfpc.FuzzyPatternClassifier(
        membership_factory=t_factory,
        aggregation_factory=nfpc.GAOWAFactory(optimizer=nfpc.ps_owa_optimizer())
    )

    from sklearn.model_selection import cross_val_score

    scores = cross_val_score(l, X, y, cv=10)
    mean = np.mean(scores)

    print("mean", mean)

    assert 0.92 < mean 
开发者ID:sorend,项目名称:fylearn,代码行数:24,代码来源:test_nfpc.py

示例15: test_classifier_iris

# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def test_classifier_iris():

    iris = load_iris()

    X = iris.data
    y = iris.target

    from sklearn.preprocessing import MinMaxScaler
    X = MinMaxScaler().fit_transform(X)

    l = fpcga.FuzzyPatternClassifierGA(iterations=100, random_state=1)

    from sklearn.model_selection import cross_val_score

    scores = cross_val_score(l, X, y, cv=10)

    assert len(scores) == 10
    assert np.mean(scores) > 0.6
    mean = np.mean(scores)

    print("mean", mean)

    assert 0.92 == pytest.approx(mean, 0.01) 
开发者ID:sorend,项目名称:fylearn,代码行数:25,代码来源:test_fpcga.py


注:本文中的sklearn.datasets.load_iris方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。