本文整理汇总了Python中tensorflow.python.estimator.inputs.numpy_io.numpy_input_fn函数的典型用法代码示例。如果您正苦于以下问题:Python numpy_input_fn函数的具体用法?Python numpy_input_fn怎么用?Python numpy_input_fn使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了numpy_input_fn函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_numpy_input_fn
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
示例2: testCheckpointCompatibleForClassifier
def testCheckpointCompatibleForClassifier(self):
n_classes = 2
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = np.reshape(
np.rint(data[:batch_size]).astype(np.int64), (batch_size, 1))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data}, batch_size=batch_size, shuffle=False)
self._testCheckpointCompatibleWithNonAnnotatedEstimator(
train_input_fn,
predict_input_fn,
dnn.DNNClassifier,
dnn_with_layer_annotations.DNNClassifierWithLayerAnnotations,
prediction_key=prediction_keys.PredictionKeys.PROBABILITIES,
estimator_args={'n_classes': n_classes})
示例3: test_numpy_input_fn
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = np.reshape(self._as_label(data[:batch_size]), (batch_size, 1))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
示例4: test_numpy_input_fn
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
batch_size = 5
img_size = 8
channel_size = 3
label_size = 3
image_data = np.zeros(
[batch_size, img_size, img_size, channel_size], dtype=np.float32)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': image_data},
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': image_data}, batch_size=batch_size, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': image_data}, shuffle=False)
train_input_fn = self._numpy_input_fn_wrapper(train_input_fn, batch_size,
label_size)
eval_input_fn = self._numpy_input_fn_wrapper(eval_input_fn, batch_size,
label_size)
predict_input_fn = self._numpy_input_fn_wrapper(predict_input_fn,
batch_size, label_size)
predict_input_fn = estimator.stargan_prediction_input_fn_wrapper(
predict_input_fn)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[batch_size, img_size, img_size, channel_size])
示例5: test_numpy_input_fn_lrdecay
def test_numpy_input_fn_lrdecay(self):
"""Tests complete flow with numpy_input_fn."""
input_dim = 4
batch_size = 5
data = np.zeros([batch_size, input_dim])
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[batch_size, input_dim],
lr_decay=True)
示例6: train_and_eval
def train_and_eval():
"""Train and evaluate the model."""
model_dir = tempfile.mkdtemp() if not FLAGS.model_dir else FLAGS.model_dir
print('model directory = %s' % model_dir)
est = build_estimator(model_dir)
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=False)
train_input_fn = numpy_io.numpy_input_fn(
x={'images': mnist.train.images},
y=mnist.train.labels.astype(numpy.int32),
batch_size=FLAGS.batch_size,
num_epochs=None,
shuffle=True)
est.fit(input_fn=train_input_fn, steps=None)
metric_name = 'accuracy'
metric = {
metric_name:
metric_spec.MetricSpec(
eval_metrics.get_metric(metric_name),
prediction_key=eval_metrics.get_prediction_key(metric_name))
}
test_input_fn = numpy_io.numpy_input_fn(
x={'images': mnist.test.images},
y=mnist.test.labels.astype(numpy.int32),
num_epochs=1,
batch_size=FLAGS.batch_size,
shuffle=False)
results = est.evaluate(input_fn=test_input_fn, metrics=metric)
for key in sorted(results):
print('%s: %s' % (key, results[key]))
示例7: get_resource_for_simple_model
def get_resource_for_simple_model(is_sequential, is_evaluate):
model = simple_sequential_model(
) if is_sequential else simple_functional_model()
if is_sequential:
model.build()
input_name = model.input_names[0]
np.random.seed(_RANDOM_SEED)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=_INPUT_SIZE,
num_classes=_NUM_CLASS)
y_train = keras.utils.to_categorical(y_train)
y_test = keras.utils.to_categorical(y_test)
train_input_fn = numpy_io.numpy_input_fn(
x={input_name: x_train},
y=y_train,
shuffle=False,
num_epochs=None,
batch_size=16)
evaluate_input_fn = numpy_io.numpy_input_fn(
x={input_name: x_test}, y=y_test, num_epochs=1, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={input_name: x_test}, num_epochs=1, shuffle=False)
inference_input_fn = evaluate_input_fn if is_evaluate else predict_input_fn
return model, (x_train, y_train), (x_test,
y_test), train_input_fn, inference_input_fn
示例8: testNumpyInputFnWithNonBoolShuffle
def testNumpyInputFnWithNonBoolShuffle(self):
x = np.arange(32, 36)
y = np.arange(4)
with self.test_session():
with self.assertRaisesRegexp(TypeError,
'shuffle must be explicitly set as boolean'):
# Default shuffle is None.
numpy_io.numpy_input_fn(x, y)
示例9: test_complete_flow_with_a_simple_linear_model
def test_complete_flow_with_a_simple_linear_model(self):
def _model_fn(features, labels, mode):
predictions = layers.dense(
features['x'], 1, kernel_initializer=init_ops.zeros_initializer())
export_outputs = {
'predictions': export_output.RegressionOutput(predictions)
}
if mode == model_fn_lib.ModeKeys.PREDICT:
return model_fn_lib.EstimatorSpec(
mode, predictions=predictions, export_outputs=export_outputs)
loss = losses.mean_squared_error(labels, predictions)
train_op = training.GradientDescentOptimizer(learning_rate=0.5).minimize(
loss, training.get_global_step())
eval_metric_ops = {
'absolute_error': metrics_lib.mean_absolute_error(
labels, predictions)
}
return model_fn_lib.EstimatorSpec(
mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs)
est = estimator.Estimator(model_fn=_model_fn)
data = np.linspace(0., 1., 100, dtype=np.float32).reshape(-1, 1)
# TRAIN
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=data, batch_size=50, num_epochs=None, shuffle=True)
est.train(train_input_fn, steps=200)
# EVALUTE
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=data, batch_size=50, num_epochs=1, shuffle=True)
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores['global_step'])
self.assertGreater(0.1, scores['absolute_error'])
# PREDICT
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=None, batch_size=10, num_epochs=1, shuffle=False)
predictions = list(est.predict(predict_input_fn))
self.assertAllClose(data, predictions, atol=0.01)
# EXPORT
feature_spec = {'x': parsing_ops.FixedLenFeature([1], dtypes.float32)}
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
示例10: testNumpyInputFnWithNonBoolShuffle
def testNumpyInputFnWithNonBoolShuffle(self):
x = np.arange(32, 36)
y = np.arange(4)
with self.cached_session():
with self.assertRaisesRegexp(ValueError,
'shuffle must be provided and explicitly '
'set as boolean'):
# Default shuffle is None.
numpy_io.numpy_input_fn(x, y)
示例11: _get_regression_input_fns
def _get_regression_input_fns():
boston = base.load_boston()
data = boston.data.astype(np.float32)
labels = boston.target.astype(np.int32)
train_input_fn = numpy_io.numpy_input_fn(
x=data, y=labels, batch_size=506, num_epochs=None, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x=data[:1,], y=None, batch_size=1, num_epochs=1, shuffle=False)
return train_input_fn, predict_input_fn
示例12: _get_classification_input_fns
def _get_classification_input_fns():
iris = base.load_iris()
data = iris.data.astype(np.float32)
labels = iris.target.astype(np.int32)
train_input_fn = numpy_io.numpy_input_fn(
x=data, y=labels, batch_size=150, num_epochs=None, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x=data[:1,], y=None, batch_size=1, num_epochs=1, shuffle=False)
return train_input_fn, predict_input_fn
示例13: test_complete_flow_with_mode
def test_complete_flow_with_mode(self, distribution):
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
train_input_fn = self.dataset_input_fn(
x={'x': data},
y=data,
batch_size=batch_size // len(distribution.worker_devices),
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=data, batch_size=batch_size, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, batch_size=batch_size, shuffle=False)
linear_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))
]
dnn_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))
]
feature_columns = linear_feature_columns + dnn_feature_columns
estimator = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=linear_feature_columns,
dnn_hidden_units=(2, 2),
dnn_feature_columns=dnn_feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir,
# TODO(isaprykin): Work around the colocate_with error.
dnn_optimizer=adagrad.AdagradOptimizer(0.001),
linear_optimizer=adagrad.AdagradOptimizer(0.001),
config=run_config.RunConfig(train_distribute=distribution))
num_steps = 10
estimator.train(train_input_fn, steps=num_steps)
scores = estimator.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in estimator.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = estimator.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
示例14: test_complete_flow
def test_complete_flow(self):
label_dimension = 2
batch_size = 10
feature_columns = [feature_column.numeric_column('x', shape=(2,))]
est = dnn.DNNRegressor(
hidden_units=(2, 2),
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# TRAIN
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
num_steps = 200
est.train(train_input_fn, steps=num_steps)
# EVALUTE
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# TODO(ptucker): Deterministic test for predicted values?
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
示例15: main
def main(*args):
"""Creates an estimator for the boston house-prices datase.
References:
* This dataset concerns housing values in Boston suburbs.
It's based on the "Boston Housing Dataset" from University of California, Irvine,
which in turn was taken from the StatLib library maintained at Carnegie Mellon University.
Returns:
* https://archive.ics.uci.edu/ml/datasets/Housing
"""
# Load dataset
boston = datasets.load_boston()
x, y = boston.data, boston.target
# Split dataset into train / test
x_train, x_test, y_train, y_test = model_selection.train_test_split(
x, y, test_size=0.2, random_state=42)
# Scale data (training set) to 0 mean and unit standard deviation.
scaler = preprocessing.StandardScaler()
x_train = scaler.fit_transform(x_train)
def graph_fn(mode, features):
x = plx.layers.FullyConnected(
mode, num_units=32, activation='relu', dropout=0.3)(features['x'])
x = plx.layers.FullyConnected(mode, num_units=32, activation='relu', dropout=0.3)(x)
return plx.layers.FullyConnected(mode, num_units=1, dropout=0.3)(x)
def model_fn(features, labels, mode):
model = plx.models.Regressor(
mode, graph_fn=graph_fn,
loss_config=plx.configs.LossConfig(module='mean_squared_error'),
optimizer_config=plx.configs.OptimizerConfig(module='sgd', learning_rate=0.01),
summaries='all')
return model(features, labels)
estimator = plx.estimators.Estimator(model_fn=model_fn,
model_dir="/tmp/polyaxon_logs/boston")
estimator.train(input_fn=numpy_input_fn(
{'x': np.asarray(x_train, dtype=np.float32)}, np.expand_dims(y_train, axis=1),
shuffle=False, num_epochs=5000, batch_size=64))
x_test = scaler.transform(x_test)
estimator.evaluate(input_fn=numpy_input_fn(
{'x': np.asarray(x_test, dtype=np.float32)}, np.expand_dims(y_test, axis=1),
shuffle=False, num_epochs=1, batch_size=32))