本文整理汇总了Python中tensorflow.contrib.learn.python.learn.datasets.base.load_iris函数的典型用法代码示例。如果您正苦于以下问题:Python load_iris函数的具体用法?Python load_iris怎么用?Python load_iris使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了load_iris函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testAdditionalOutputs
def testAdditionalOutputs(self):
"""Tests multi-class classification using matrix data as input."""
hparams = tensor_forest.ForestHParams(
num_trees=1,
max_nodes=100,
num_classes=3,
num_features=4,
split_after_samples=20,
inference_tree_paths=True)
classifier = random_forest.CoreTensorForestEstimator(
hparams.fill(), keys_column='keys', include_all_in_serving=True)
iris = base.load_iris()
data = iris.data.astype(np.float32)
labels = iris.target.astype(np.int32)
input_fn = numpy_io.numpy_input_fn(
x={
'x': data,
'keys': np.arange(len(iris.data)).reshape(150, 1)
},
y=labels,
batch_size=10,
num_epochs=1,
shuffle=False)
classifier.train(input_fn=input_fn, steps=100)
predictions = list(classifier.predict(input_fn=input_fn))
# Check that there is a key column, tree paths and var.
for pred in predictions:
self.assertTrue('keys' in pred)
self.assertTrue('tree_paths' in pred)
self.assertTrue('prediction_variance' in pred)
示例2: testWithFeatureColumns
def testWithFeatureColumns(self):
head_fn = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes=3, loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
hparams = tensor_forest.ForestHParams(
num_trees=3,
max_nodes=1000,
num_classes=3,
num_features=4,
split_after_samples=20,
inference_tree_paths=True)
est = random_forest.CoreTensorForestEstimator(
hparams.fill(),
head=head_fn,
feature_columns=[core_feature_column.numeric_column('x')])
iris = base.load_iris()
data = {'x': iris.data.astype(np.float32)}
labels = iris.target.astype(np.int32)
input_fn = numpy_io.numpy_input_fn(
x=data, y=labels, batch_size=150, num_epochs=None, shuffle=False)
est.train(input_fn=input_fn, steps=100)
res = est.evaluate(input_fn=input_fn, steps=1)
self.assertEqual(1.0, res['accuracy'])
self.assertAllClose(0.55144483, res['loss'])
示例3: _input_fn
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
示例4: testIrisDNN
def testIrisDNN(self):
iris = base.load_iris()
feature_columns = [feature_column.real_valued_column("", dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(iris.data, iris.target, max_steps=200)
variable_names = classifier.get_variable_names()
self.assertEqual(
classifier.get_variable_value("dnn/hiddenlayer_0/weights").shape,
(4, 10))
self.assertEqual(
classifier.get_variable_value("dnn/hiddenlayer_1/weights").shape,
(10, 20))
self.assertEqual(
classifier.get_variable_value("dnn/hiddenlayer_2/weights").shape,
(20, 10))
self.assertEqual(
classifier.get_variable_value("dnn/logits/weights").shape, (10, 3))
self.assertIn("dnn/hiddenlayer_0/biases", variable_names)
self.assertIn("dnn/hiddenlayer_1/biases", variable_names)
self.assertIn("dnn/hiddenlayer_2/biases", variable_names)
self.assertIn("dnn/logits/biases", variable_names)
示例5: testIrisInputFn
def testIrisInputFn(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
示例6: iris_input_multiclass_fn
def iris_input_multiclass_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=(150, 1), dtype=dtypes.int32)
示例7: testIrisAllDictionaryInput
def testIrisAllDictionaryInput(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
iris_data = {'input': iris.data}
iris_target = {'labels': iris.target}
est.fit(iris_data, iris_target, steps=100)
scores = est.evaluate(
x=iris_data,
y=iris_target,
metrics={
('accuracy', 'class'): metric_ops.streaming_accuracy
})
predictions = list(est.predict(x=iris_data))
predictions_class = list(est.predict(x=iris_data, outputs=['class']))
self.assertEqual(len(predictions), iris.target.shape[0])
classes_batch = np.array([p['class'] for p in predictions])
self.assertAllClose(classes_batch,
np.array([p['class'] for p in predictions_class]))
self.assertAllClose(classes_batch,
np.argmax(
np.array([p['prob'] for p in predictions]), axis=1))
other_score = _sklearn.accuracy_score(iris.target, classes_batch)
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
示例8: _iris_data_input_fn
def _iris_data_input_fn():
# Converts iris data to a logistic regression problem.
iris = base.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
features = constant_op.constant(iris.data[ids], dtype=dtypes.float32)
labels = constant_op.constant(iris.target[ids], dtype=dtypes.float32)
labels = array_ops.reshape(labels, labels.get_shape().concatenate(1))
return features, labels
示例9: iris_input_fn_labels_dict
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
示例10: testIrisIteratorPlainInt
def testIrisIteratorPlainInt(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
示例11: testMultiClass_NpMatrixData
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
classifier = debug.DebugClassifier(n_classes=3)
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
示例12: testIrisIterator
def testIrisIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
示例13: testDNNDropout0
def testDNNDropout0(self):
# Dropout prob == 0.
iris = base.load_iris()
feature_columns = [feature_column.real_valued_column("", dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
dropout=0.0,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(iris.data, iris.target, max_steps=200)
示例14: _get_classification_input_fns
def _get_classification_input_fns():
iris = base.load_iris()
data = iris.data.astype(np.float32)
labels = iris.target.astype(np.int32)
train_input_fn = numpy_io.numpy_input_fn(
x=data, y=labels, batch_size=150, num_epochs=None, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x=data[:1,], y=None, batch_size=1, num_epochs=1, shuffle=False)
return train_input_fn, predict_input_fn
示例15: MLP_iris
def MLP_iris():
# load the iris data.
iris = load_iris()
np.random.seed(0)
random_index = np.random.permutation(150)
iris_data = iris.data[random_index]
iris_target = iris.target[random_index]
iris_target_onehot = np.zeros((150, 3))
iris_target_onehot[np.arange(150), iris_target] = 1
accuracy_list = []
# build computation graph
x = tf.placeholder("float", shape=[None, 4], name='x')
y_target = tf.placeholder("float", shape=[None, 3], name='y_target')
W1 = tf.Variable(tf.zeros([4, 128]), name='W1')
b1 = tf.Variable(tf.zeros([128]), name='b1')
h1 = tf.sigmoid(tf.matmul(x, W1) + b1, name='h1')
W2 = tf.Variable(tf.zeros([128, 3]), name='W2')
b2 = tf.Variable(tf.zeros([3]), name='b2')
y = tf.nn.softmax(tf.matmul(h1, W2) + b2, name='y')
cross_entropy = -tf.reduce_sum(y_target * tf.log(y), name='cross_entropy')
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_target, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)))
sess.run(tf.global_variables_initializer())
for i in range(500):
sess.run(train_step, feed_dict={x: iris_data[0:100], y_target: iris_target_onehot[0:100]})
train_accuracy = sess.run(accuracy, feed_dict={x: iris_data[0:100], y_target: iris_target_onehot[0:100]})
validation_accuracy = sess.run(accuracy, feed_dict={x: iris_data[100:], y_target: iris_target_onehot[100:]})
print (
"step %d, training accuracy: %.3f / validation accuracy: %.3f" % (i, train_accuracy, validation_accuracy))
accuracy_list.append(validation_accuracy)
if i >= 50:
if validation_accuracy - np.mean(accuracy_list[int(round(len(accuracy_list) / 2)):]) <= 0.01:
break
sess.close()