本文整理汇总了Python中tensorflow.python.estimator.export.export.build_parsing_serving_input_receiver_fn函数的典型用法代码示例。如果您正苦于以下问题:Python build_parsing_serving_input_receiver_fn函数的具体用法?Python build_parsing_serving_input_receiver_fn怎么用?Python build_parsing_serving_input_receiver_fn使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了build_parsing_serving_input_receiver_fn函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _test_complete_flow
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, label_dimension, prediction_length):
feature_columns = [
feature_column_lib.numeric_column('x', shape=(input_dimension,))
]
est = _baseline_estimator_fn(
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['predictions'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column_lib.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
示例2: _test_complete_flow_helper
def _test_complete_flow_helper(
self, linear_feature_columns, dnn_feature_columns, feature_spec,
train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
label_dimension, batch_size):
est = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=linear_feature_columns,
dnn_hidden_units=(2, 2),
dnn_feature_columns=dnn_feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
示例3: _test_complete_flow
def _test_complete_flow(self, feature_columns, train_input_fn, eval_input_fn,
predict_input_fn, n_classes, batch_size):
cell_units = [4, 2]
est = self._create_estimator_fn(feature_columns, n_classes, cell_units,
self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUATE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = parsing_utils.classifier_parse_example_spec(
feature_columns,
label_key='label',
label_dtype=dtypes.int64)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
示例4: _test_complete_flow
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
n_classes, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNClassifier(
hidden_units=(2, 2),
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
示例5: _test_complete_flow
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
label_dimension, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = linear.LinearEstimator(
head=head_lib.regression_head(label_dimension=label_dimension),
feature_columns=feature_columns,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
示例6: test_scaffold_is_used_for_saver
def test_scaffold_is_used_for_saver(self):
tmpdir = tempfile.mkdtemp()
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
variables.Variable(1., name='weight')
real_saver = saver.Saver()
self.mock_saver = test.mock.Mock(
wraps=real_saver, saver_def=real_saver.saver_def)
scores = constant_op.constant([3.])
return model_fn_lib.EstimatorSpec(
mode=mode,
predictions=constant_op.constant([[1.]]),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
scaffold=training.Scaffold(saver=self.mock_saver),
export_outputs={'test': export_output.ClassificationOutput(scores)})
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.train(dummy_input_fn, steps=1)
feature_spec = {'x': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'y': parsing_ops.VarLenFeature(dtype=dtypes.int64)}
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
# Perform the export.
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
est.export_savedmodel(export_dir_base, serving_input_receiver_fn)
self.assertTrue(self.mock_saver.restore.called)
示例7: test_complete_flow_with_a_simple_linear_model
def test_complete_flow_with_a_simple_linear_model(self):
def _model_fn(features, labels, mode):
predictions = layers.dense(
features['x'], 1, kernel_initializer=init_ops.zeros_initializer())
export_outputs = {
'predictions': export_output.RegressionOutput(predictions)
}
if mode == model_fn_lib.ModeKeys.PREDICT:
return model_fn_lib.EstimatorSpec(
mode, predictions=predictions, export_outputs=export_outputs)
loss = losses.mean_squared_error(labels, predictions)
train_op = training.GradientDescentOptimizer(learning_rate=0.5).minimize(
loss, training.get_global_step())
eval_metric_ops = {
'absolute_error': metrics_lib.mean_absolute_error(
labels, predictions)
}
return model_fn_lib.EstimatorSpec(
mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs)
est = estimator.Estimator(model_fn=_model_fn)
data = np.linspace(0., 1., 100, dtype=np.float32).reshape(-1, 1)
# TRAIN
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=data, batch_size=50, num_epochs=None, shuffle=True)
est.train(train_input_fn, steps=200)
# EVALUTE
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=data, batch_size=50, num_epochs=1, shuffle=True)
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores['global_step'])
self.assertGreater(0.1, scores['absolute_error'])
# PREDICT
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=None, batch_size=10, num_epochs=1, shuffle=False)
predictions = list(est.predict(predict_input_fn))
self.assertAllClose(data, predictions, atol=0.01)
# EXPORT
feature_spec = {'x': parsing_ops.FixedLenFeature([1], dtypes.float32)}
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
示例8: make_parsing_export_strategy
def make_parsing_export_strategy(feature_columns,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
exports_to_keep=5,
target_core=False,
strip_default_attrs=False):
# pylint: disable=line-too-long
"""Create an ExportStrategy for use with Experiment, using `FeatureColumn`s.
Creates a SavedModel export that expects to be fed with a single string
Tensor containing serialized tf.Examples. At serving time, incoming
tf.Examples will be parsed according to the provided `FeatureColumn`s.
Args:
feature_columns: An iterable of `FeatureColumn`s representing the features
that must be provided at serving time (excluding labels!).
default_output_alternative_key: the name of the head to serve when an
incoming serving request does not explicitly request a specific head.
Must be `None` if the estimator inherits from ${tf.estimator.Estimator}
or for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
exports_to_keep: Number of exports to keep. Older exports will be
garbage-collected. Defaults to 5. Set to None to disable garbage
collection.
target_core: If True, prepare an ExportStrategy for use with
tensorflow.python.estimator.*. If False (default), prepare an
ExportStrategy for use with tensorflow.contrib.learn.python.learn.*.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
Returns:
An ExportStrategy that can be passed to the Experiment constructor.
"""
# pylint: enable=line-too-long
feature_spec = feature_column.create_feature_spec_for_parsing(feature_columns)
if target_core:
serving_input_fn = (
core_export.build_parsing_serving_input_receiver_fn(feature_spec))
else:
serving_input_fn = (
input_fn_utils.build_parsing_serving_input_fn(feature_spec))
return make_export_strategy(
serving_input_fn,
default_output_alternative_key=default_output_alternative_key,
assets_extra=assets_extra,
as_text=as_text,
exports_to_keep=exports_to_keep,
strip_default_attrs=strip_default_attrs)
示例9: test_complete_flow_with_mode
def test_complete_flow_with_mode(self, distribution):
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
train_input_fn = self.dataset_input_fn(
x={'x': data},
y=data,
batch_size=batch_size // len(distribution.worker_devices),
shuffle=True)
eval_input_fn = self.dataset_input_fn(
x={'x': data},
y=data,
batch_size=batch_size // len(distribution.worker_devices),
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, batch_size=batch_size, shuffle=False)
linear_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))
]
dnn_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))
]
feature_columns = linear_feature_columns + dnn_feature_columns
estimator = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=linear_feature_columns,
dnn_hidden_units=(2, 2),
dnn_feature_columns=dnn_feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir,
# TODO(isaprykin): Work around the colocate_with error.
dnn_optimizer=adagrad.AdagradOptimizer(0.001),
linear_optimizer=adagrad.AdagradOptimizer(0.001),
config=run_config.RunConfig(
train_distribute=distribution, eval_distribute=distribution))
num_steps = 10
estimator.train(train_input_fn, steps=num_steps)
scores = estimator.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in estimator.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = estimator.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
示例10: test_complete_flow
def test_complete_flow(self):
label_dimension = 2
batch_size = 10
feature_columns = [feature_column.numeric_column('x', shape=(2,))]
est = dnn.DNNRegressor(
hidden_units=(2, 2),
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# TRAIN
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
num_steps = 200
est.train(train_input_fn, steps=num_steps)
# EVALUTE
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# TODO(ptucker): Deterministic test for predicted values?
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
示例11: test_build_parsing_serving_input_receiver_fn
def test_build_parsing_serving_input_receiver_fn(self):
feature_spec = {"int_feature": parsing_ops.VarLenFeature(dtypes.int64),
"float_feature": parsing_ops.VarLenFeature(dtypes.float32)}
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
with ops.Graph().as_default():
serving_input_receiver = serving_input_receiver_fn()
self.assertEqual(set(["int_feature", "float_feature"]),
set(serving_input_receiver.features.keys()))
self.assertEqual(set(["examples"]),
set(serving_input_receiver.receiver_tensors.keys()))
example = example_pb2.Example()
text_format.Parse("features: { "
" feature: { "
" key: 'int_feature' "
" value: { "
" int64_list: { "
" value: [ 21, 2, 5 ] "
" } "
" } "
" } "
" feature: { "
" key: 'float_feature' "
" value: { "
" float_list: { "
" value: [ 525.25 ] "
" } "
" } "
" } "
"} ", example)
with self.test_session() as sess:
sparse_result = sess.run(
serving_input_receiver.features,
feed_dict={
serving_input_receiver.receiver_tensors["examples"].name:
[example.SerializeToString()]})
self.assertAllEqual([[0, 0], [0, 1], [0, 2]],
sparse_result["int_feature"].indices)
self.assertAllEqual([21, 2, 5],
sparse_result["int_feature"].values)
self.assertAllEqual([[0, 0]],
sparse_result["float_feature"].indices)
self.assertAllEqual([525.25],
sparse_result["float_feature"].values)
示例12: testTrainEvaluateWithDnnForInputAndTreeForPredict
def testTrainEvaluateWithDnnForInputAndTreeForPredict(self):
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 3
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
est = estimator.CoreDNNBoostedTreeCombinedEstimator(
head=head_fn,
dnn_hidden_units=[1],
dnn_feature_columns=[core_feature_column.numeric_column("x")],
tree_learner_config=learner_config,
num_trees=1,
tree_examples_per_layer=3,
model_dir=model_dir,
config=config,
dnn_steps_to_train=10,
dnn_input_layer_to_tree=True,
predict_with_tree_only=True,
dnn_to_tree_distillation_param=(0.5, None),
tree_feature_columns=[])
# Train for a few steps.
est.train(input_fn=_train_input_fn, steps=1000)
res = est.evaluate(input_fn=_eval_input_fn, steps=1)
self.assertLess(0.5, res["auc"])
est.predict(input_fn=_eval_input_fn)
serving_input_fn = (
export.build_parsing_serving_input_receiver_fn(
feature_spec={"x": parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32)}))
base_exporter = exporter.FinalExporter(
name="Servo",
serving_input_receiver_fn=serving_input_fn,
assets_extra=None)
export_path = os.path.join(model_dir, "export")
base_exporter.export(
est,
export_path=export_path,
checkpoint_path=None,
eval_result={},
is_the_final_export=True)
示例13: test_scaffold_is_used_for_local_init
def test_scaffold_is_used_for_local_init(self):
tmpdir = tempfile.mkdtemp()
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
my_int = variables.Variable(1, name='my_int',
collections=[ops.GraphKeys.LOCAL_VARIABLES])
scores = constant_op.constant([3.])
with ops.control_dependencies(
[variables.local_variables_initializer(),
data_flow_ops.tables_initializer()]):
assign_op = state_ops.assign(my_int, 12345)
# local_initSop must be an Operation, not a Tensor.
custom_local_init_op = control_flow_ops.group(assign_op)
return model_fn_lib.EstimatorSpec(
mode=mode,
predictions=constant_op.constant([[1.]]),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
scaffold=training.Scaffold(local_init_op=custom_local_init_op),
export_outputs={'test': export_output.ClassificationOutput(scores)})
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.train(dummy_input_fn, steps=1)
feature_spec = {'x': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'y': parsing_ops.VarLenFeature(dtype=dtypes.int64)}
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
# Perform the export.
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(export_dir_base,
serving_input_receiver_fn)
# Restore, to validate that the custom local_init_op runs.
with ops.Graph().as_default() as graph:
with session.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
my_int = graph.get_tensor_by_name('my_int:0')
my_int_value = sess.run(my_int)
self.assertEqual(12345, my_int_value)
示例14: test_export_savedmodel_with_saveables_proto_roundtrip
def test_export_savedmodel_with_saveables_proto_roundtrip(self):
tmpdir = tempfile.mkdtemp()
est = estimator.Estimator(
model_fn=_model_fn_with_saveables_for_export_tests)
est.train(input_fn=dummy_input_fn, steps=1)
feature_spec = {'x': parsing_ops.VarLenFeature(dtype=dtypes.int64),
'y': parsing_ops.VarLenFeature(dtype=dtypes.int64)}
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
# Perform the export.
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_receiver_fn)
# Check that all the files are in the right places.
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(gfile.Exists(os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('saved_model.pb'))))
self.assertTrue(gfile.Exists(os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables'))))
self.assertTrue(gfile.Exists(os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(gfile.Exists(os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('save/LookupTableImport' in graph_ops)
# Clean up.
gfile.DeleteRecursively(tmpdir)
示例15: test_complete_flow
def test_complete_flow(self):
label_dimension = 2
batch_size = 10
feature_columns = [
feature_column_lib.numeric_column('x', shape=(2,))
]
est = linear.LinearRegressor(
feature_columns=feature_columns, label_dimension=label_dimension,
model_dir=self._model_dir)
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# TRAIN
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=data, batch_size=batch_size, num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
# EVALUTE
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=data, batch_size=batch_size, num_epochs=1,
shuffle=False)
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=None, batch_size=batch_size, num_epochs=1,
shuffle=False)
predictions = list(
[x['predictions'] for x in est.predict(predict_input_fn)])
self.assertAllClose(data, predictions, atol=0.01)
# EXPORT
feature_spec = feature_column_lib.make_parse_example_spec(
feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))