本文整理汇总了Python中sklearn.datasets.load_iris方法的典型用法代码示例。如果您正苦于以下问题:Python datasets.load_iris方法的具体用法?Python datasets.load_iris怎么用?Python datasets.load_iris使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.datasets
的用法示例。
在下文中一共展示了datasets.load_iris方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_different_results
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def test_different_results(self):
from sklearn import datasets
from sklearn import linear_model
from sklearn.model_selection import train_test_split
dataset = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(dataset.data, dataset.target, test_size=0.2)
clf = LogisticRegression(data_norm=12)
clf.fit(X_train, y_train)
predict1 = clf.predict(X_test)
clf = LogisticRegression(data_norm=12)
clf.fit(X_train, y_train)
predict2 = clf.predict(X_test)
clf = linear_model.LogisticRegression(solver="lbfgs", multi_class="ovr")
clf.fit(X_train, y_train)
predict3 = clf.predict(X_test)
self.assertFalse(np.all(predict1 == predict2))
self.assertFalse(np.all(predict3 == predict1) and np.all(predict3 == predict2))
示例2: test_same_results
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def test_same_results(self):
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn import linear_model
dataset = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(dataset.data, dataset.target, test_size=0.2)
clf = LogisticRegression(data_norm=12, epsilon=float("inf"))
clf.fit(X_train, y_train)
predict1 = clf.predict(X_test)
clf = linear_model.LogisticRegression(solver="lbfgs", multi_class="ovr")
clf.fit(X_train, y_train)
predict2 = clf.predict(X_test)
self.assertTrue(np.all(predict1 == predict2))
示例3: test_different_results
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def test_different_results(self):
from sklearn import datasets
from sklearn import linear_model
from sklearn.model_selection import train_test_split
dataset = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(dataset.data, dataset.target, test_size=0.2)
clf = LinearRegression(data_norm=12, bounds_X=([4.3, 2.0, 1.1, 0.1], [7.9, 4.4, 6.9, 2.5]), bounds_y=(0, 2))
clf.fit(X_train, y_train)
predict1 = clf.predict(X_test)
clf = LinearRegression(data_norm=12, bounds_X=([4.3, 2.0, 1.1, 0.1], [7.9, 4.4, 6.9, 2.5]), bounds_y=(0, 2))
clf.fit(X_train, y_train)
predict2 = clf.predict(X_test)
clf = linear_model.LinearRegression()
clf.fit(X_train, y_train)
predict3 = clf.predict(X_test)
self.assertFalse(np.all(predict1 == predict2))
self.assertFalse(np.all(predict3 == predict1) and np.all(predict3 == predict2))
示例4: test_same_results
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def test_same_results(self):
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn import linear_model
dataset = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(dataset.data, dataset.target, test_size=0.2)
clf = LinearRegression(data_norm=12, epsilon=float("inf"),
bounds_X=([4.3, 2.0, 1.0, 0.1], [7.9, 4.4, 6.9, 2.5]), bounds_y=(0, 2))
clf.fit(X_train, y_train)
predict1 = clf.predict(X_test)
clf = linear_model.LinearRegression(normalize=False)
clf.fit(X_train, y_train)
predict2 = clf.predict(X_test)
self.assertTrue(np.allclose(predict1, predict2))
示例5: test_different_results
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def test_different_results(self):
from sklearn.naive_bayes import GaussianNB as sk_nb
from sklearn import datasets
global_seed(12345)
dataset = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(dataset.data, dataset.target, test_size=.2)
bounds = ([4.3, 2.0, 1.0, 0.1], [7.9, 4.4, 6.9, 2.5])
clf_dp = GaussianNB(epsilon=1.0, bounds=bounds)
clf_non_private = sk_nb()
for clf in [clf_dp, clf_non_private]:
clf.fit(x_train, y_train)
same_prediction = clf_dp.predict(x_test) == clf_non_private.predict(x_test)
self.assertFalse(np.all(same_prediction))
示例6: test_with_iris
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def test_with_iris(self):
global_seed(12345)
from sklearn import datasets
dataset = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(dataset.data, dataset.target, test_size=.2)
bounds = ([4.3, 2.0, 1.0, 0.1], [7.9, 4.4, 6.9, 2.5])
clf = GaussianNB(epsilon=5.0, bounds=bounds)
clf.fit(x_train, y_train)
accuracy = clf.score(x_test, y_test)
counts = clf.class_count_.copy()
self.assertGreater(accuracy, 0.5)
clf.partial_fit(x_train, y_train)
new_counts = clf.class_count_
self.assertEqual(np.sum(new_counts), np.sum(counts) * 2)
示例7: setUp
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def setUp(self):
iris = load_iris()
theano.config.floatX = 'float32'
X = iris.data.astype(theano.config.floatX)
y = iris.target.astype(np.int32)
y_ohe = np_utils.to_categorical(y)
model = Sequential()
model.add(Dense(input_dim=X.shape[1], output_dim=5, activation='tanh'))
model.add(Dense(input_dim=5, output_dim=y_ohe.shape[1], activation='sigmoid'))
model.compile(loss='categorical_crossentropy', optimizer='sgd')
model.fit(X, y_ohe, nb_epoch=10, batch_size=1, verbose=3, validation_data=None)
params = {'copyright': 'Václav Čadek', 'model_name': 'Iris Model'}
self.model = model
self.pmml = keras2pmml(self.model, **params)
self.num_inputs = self.model.input_shape[1]
self.num_outputs = self.model.output_shape[1]
self.num_connection_layers = len(self.model.layers)
self.features = ['x{}'.format(i) for i in range(self.num_inputs)]
self.class_values = ['y{}'.format(i) for i in range(self.num_outputs)]
示例8: test_bagged_imputer_classification
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def test_bagged_imputer_classification():
iris = load_iris()
# make DF, add species col
X = pd.DataFrame.from_records(data=iris.data, columns=iris.feature_names)
X['species'] = iris.target
# shuffle...
X = shuffle_dataframe(X)
# set random indices to be null.. 15% should be good
rands = np.random.rand(X.shape[0])
mask = rands > 0.85
X['species'].iloc[mask] = np.nan
# define imputer, assert no missing
imputer = BaggedCategoricalImputer(cols=['species'])
y = imputer.fit_transform(X)
assert y['species'].isnull().sum() == 0, 'expected no null...'
# now test with a different estimator
imputer = BaggedCategoricalImputer(cols=['species'], base_estimator=RandomForestClassifier())
y = imputer.fit_transform(X)
assert y['species'].isnull().sum() == 0, 'expected no null...'
示例9: test_few_classification
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def test_few_classification():
"""test_few.py: tests default classification settings"""
np.random.seed(42)
X, y = load_iris(return_X_y=True)
train,test = train_test_split(np.arange(X.shape[0]), train_size=0.75,
test_size=0.25)
few = FEW(classification=True,population_size='1x',generations=10)
few.fit(X[train],y[train])
print('train score:', few.score(X[train],y[train]))
print('test score:', few.score(X[test],y[test]))
# test boolean output
few = FEW(classification=True,otype='b',population_size='2x',
seed_with_ml=False,generations=10)
np.random.seed(42)
few.fit(X[train],y[train])
print('train score:', few.score(X[train],y[train]))
print('test score:', few.score(X[test],y[test]))
few.print_model()
示例10: main
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def main():
raw_data = load_iris()
data = pd.DataFrame(raw_data["data"], columns=raw_data["feature_names"])
pipeline = FeatureUnion([
("1", make_pipeline(
FunctionTransformer(lambda X: X.loc[:, ["sepal length (cm)"]]),
# other transformations
)),
("2", make_pipeline(
FunctionTransformer(lambda X: X.loc[:, ["sepal width (cm)"]]),
# other transformations
))
])
X = pipeline.fit_transform(data)
print(X["sepal length (cm)"].mean())
print(X["sepal width (cm)"].mean())
示例11: main
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def main():
raw_data = load_iris()
data = pd.DataFrame(raw_data["data"], columns=raw_data["feature_names"])
data.loc[:, "class"] = raw_data["target"]
pipeline = PandasFeatureUnion([
("1", make_pipeline(
PandasTransform(lambda X: X.loc[:, ["sepal length (cm)"]]),
# other transformations
)),
("2", make_pipeline(
PandasTransform(lambda X: X.loc[:, ["sepal width (cm)"]]),
# other transformations
))
])
X = pipeline.fit_transform(data)
print(X["sepal length (cm)"].mean())
print(X["sepal width (cm)"].mean())
示例12: main
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def main():
raw_data = load_iris()
data = pd.DataFrame(raw_data["data"], columns=raw_data["feature_names"])
data.loc[:, "class"] = raw_data["target"]
pipeline = FeatureUnion([
("1", make_pipeline(
PandasTransform(lambda X: X.loc[:, ["sepal length (cm)"]]),
# other transformations
)),
("2", make_pipeline(
PandasTransform(lambda X: X.loc[:, ["sepal width (cm)"]]),
# other transformations
))
])
X = pipeline.fit_transform(data)
print(X["sepal length (cm)"].mean())
print(X["sepal width (cm)"].mean())
示例13: test_build_meowa_factory
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def test_build_meowa_factory():
iris = datasets.load_iris()
X = iris.data
y = iris.target
from sklearn.preprocessing import MinMaxScaler
X = MinMaxScaler().fit_transform(X)
l = nfpc.FuzzyPatternClassifier(membership_factory=t_factory,
aggregation_factory=nfpc.MEOWAFactory())
from sklearn.model_selection import cross_val_score
scores = cross_val_score(l, X, y, cv=10)
mean = np.mean(scores)
assert 0.80 < mean
示例14: test_build_ps_owa_factory
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def test_build_ps_owa_factory():
iris = datasets.load_iris()
X = iris.data
y = iris.target
from sklearn.preprocessing import MinMaxScaler
X = MinMaxScaler().fit_transform(X)
l = nfpc.FuzzyPatternClassifier(
membership_factory=t_factory,
aggregation_factory=nfpc.GAOWAFactory(optimizer=nfpc.ps_owa_optimizer())
)
from sklearn.model_selection import cross_val_score
scores = cross_val_score(l, X, y, cv=10)
mean = np.mean(scores)
print("mean", mean)
assert 0.92 < mean
示例15: test_classifier_iris
# 需要导入模块: from sklearn import datasets [as 别名]
# 或者: from sklearn.datasets import load_iris [as 别名]
def test_classifier_iris():
iris = load_iris()
X = iris.data
y = iris.target
from sklearn.preprocessing import MinMaxScaler
X = MinMaxScaler().fit_transform(X)
l = fpcga.FuzzyPatternClassifierGA(iterations=100, random_state=1)
from sklearn.model_selection import cross_val_score
scores = cross_val_score(l, X, y, cv=10)
assert len(scores) == 10
assert np.mean(scores) > 0.6
mean = np.mean(scores)
print("mean", mean)
assert 0.92 == pytest.approx(mean, 0.01)