本文整理汇总了Python中tensorflow.python.feature_column.feature_column.numeric_column函数的典型用法代码示例。如果您正苦于以下问题:Python numeric_column函数的具体用法?Python numeric_column怎么用?Python numeric_column使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了numeric_column函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_weight_column_should_not_be_used_as_feature
def test_weight_column_should_not_be_used_as_feature(self):
with self.assertRaisesRegexp(ValueError,
'weight_column should not be used as feature'):
parsing_utils.classifier_parse_example_spec(
feature_columns=[fc.numeric_column('a')],
label_key='b',
weight_column=fc.numeric_column('a'))
示例2: test_parse_features
def test_parse_features(self):
"""Tests the various behaviours of kmeans._parse_features_if_necessary."""
# No-op if a tensor is passed in.
features = constant_op.constant(self.points)
parsed_features = kmeans_lib._parse_features_if_necessary(features, None)
self.assertAllEqual(features, parsed_features)
# All values from a feature dict are transformed into a tensor.
feature_dict = {
'x': [[point[0]] for point in self.points],
'y': [[point[1]] for point in self.points]
}
parsed_feature_dict = kmeans_lib._parse_features_if_necessary(
feature_dict, None)
self._parse_feature_dict_helper(features, parsed_feature_dict)
# Only the feature_columns of a feature dict are transformed into a tensor.
feature_dict_with_extras = {
'foo': 'bar',
'x': [[point[0]] for point in self.points],
'baz': {'fizz': 'buzz'},
'y': [[point[1]] for point in self.points]
}
feature_columns = [fc.numeric_column(key='x'), fc.numeric_column(key='y')]
parsed_feature_dict = kmeans_lib._parse_features_if_necessary(
feature_dict_with_extras, feature_columns)
self._parse_feature_dict_helper(features, parsed_feature_dict)
示例3: test_multi_feature_column
def test_multi_feature_column(self):
# Create checkpoint: num_inputs=2, hidden_units=(2, 2), num_outputs=1.
global_step = 100
_create_checkpoint((
(((1., 2.), (3., 4.),), (5., 6.)),
(((7., 8.), (9., 8.),), (7., 6.)),
(((5.,), (4.,),), (3.,))
), global_step, self._model_dir)
# Create DNNRegressor and evaluate.
dnn_regressor = dnn.DNNRegressor(
hidden_units=(2, 2),
feature_columns=(feature_column.numeric_column('age'),
feature_column.numeric_column('height')),
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={'age': np.array(((20,), (40,))), 'height': np.array(((4,), (8,)))},
y=np.array(((213.,), (421.,))),
batch_size=2,
shuffle=False)
self.assertAllClose({
# TODO(ptucker): Point to tool for calculating a neural net output?
# predictions = 7315, 13771
# loss = ((213-7315)^2 + (421-13771)^2) / 2 = 228660896
metric_keys.MetricKeys.LOSS: 228660896.,
# average_loss = loss / 2 = 114330452
metric_keys.MetricKeys.LOSS_MEAN: 114330452.,
ops.GraphKeys.GLOBAL_STEP: global_step
}, dnn_regressor.evaluate(input_fn=input_fn, steps=1))
示例4: _get_estimator
def _get_estimator(self,
train_distribute,
eval_distribute,
remote_cluster=None):
input_dimension = LABEL_DIMENSION
linear_feature_columns = [
feature_column.numeric_column("x", shape=(input_dimension,))
]
dnn_feature_columns = [
feature_column.numeric_column("x", shape=(input_dimension,))
]
return dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=linear_feature_columns,
dnn_hidden_units=(2, 2),
dnn_feature_columns=dnn_feature_columns,
label_dimension=LABEL_DIMENSION,
model_dir=self._model_dir,
dnn_optimizer=adagrad.AdagradOptimizer(0.001),
linear_optimizer=adagrad.AdagradOptimizer(0.001),
config=run_config_lib.RunConfig(
experimental_distribute=DistributeConfig(
train_distribute=train_distribute,
eval_distribute=eval_distribute,
remote_cluster=remote_cluster)))
示例5: test_complete_flow_with_mode
def test_complete_flow_with_mode(self, distribution):
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
train_input_fn = self.dataset_input_fn(
x={'x': data},
y=data,
batch_size=batch_size // len(distribution.worker_devices),
shuffle=True)
eval_input_fn = self.dataset_input_fn(
x={'x': data},
y=data,
batch_size=batch_size // len(distribution.worker_devices),
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, batch_size=batch_size, shuffle=False)
linear_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))
]
dnn_feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))
]
feature_columns = linear_feature_columns + dnn_feature_columns
estimator = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=linear_feature_columns,
dnn_hidden_units=(2, 2),
dnn_feature_columns=dnn_feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir,
# TODO(isaprykin): Work around the colocate_with error.
dnn_optimizer=adagrad.AdagradOptimizer(0.001),
linear_optimizer=adagrad.AdagradOptimizer(0.001),
config=run_config.RunConfig(
train_distribute=distribution, eval_distribute=distribution))
num_steps = 10
estimator.train(train_input_fn, steps=num_steps)
scores = estimator.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in estimator.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = estimator.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
示例6: test_weight_column_as_numeric_column
def test_weight_column_as_numeric_column(self):
parsing_spec = parsing_utils.classifier_parse_example_spec(
feature_columns=[fc.numeric_column('a')],
label_key='b',
weight_column=fc.numeric_column('c'))
expected_spec = {
'a': parsing_ops.FixedLenFeature((1,), dtype=dtypes.float32),
'b': parsing_ops.FixedLenFeature((1,), dtype=dtypes.int64),
'c': parsing_ops.FixedLenFeature((1,), dtype=dtypes.float32),
}
self.assertDictEqual(expected_spec, parsing_spec)
示例7: test_multi_feature_column_multi_dim_logits
def test_multi_feature_column_multi_dim_logits(self):
"""Tests multiple feature columns and multi-dimensional logits.
All numbers are the same as test_multi_dim_input_multi_dim_logits. The only
difference is that the input consists of two 1D feature columns, instead of
one 2D feature column.
"""
base_global_step = 100
create_checkpoint((([[.6, .5], [-.6, -.5]],
[.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]),
([[-1., 1., .5], [-1., 1., .5]], [.3, -.3, .0]),),
base_global_step, self._model_dir)
hidden_units = (2, 2)
logits_dimension = 3
inputs = ([[10.]], [[8.]])
expected_logits = [[-0.48, 0.48, 0.39]]
for mode in [
model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.PREDICT
]:
with ops.Graph().as_default():
training_util.create_global_step()
head = mock_head(
self,
hidden_units=hidden_units,
logits_dimension=logits_dimension,
expected_logits=expected_logits)
estimator_spec = self._dnn_model_fn(
features={
'age': constant_op.constant(inputs[0]),
'height': constant_op.constant(inputs[1])
},
labels=constant_op.constant([[1]]),
mode=mode,
head=head,
hidden_units=hidden_units,
feature_columns=[
feature_column.numeric_column('age'),
feature_column.numeric_column('height')
],
optimizer=mock_optimizer(self, hidden_units))
with monitored_session.MonitoredTrainingSession(
checkpoint_dir=self._model_dir) as sess:
if mode == model_fn.ModeKeys.TRAIN:
sess.run(estimator_spec.train_op)
elif mode == model_fn.ModeKeys.EVAL:
sess.run(estimator_spec.loss)
elif mode == model_fn.ModeKeys.PREDICT:
sess.run(estimator_spec.predictions)
else:
self.fail('Invalid mode: {}'.format(mode))
示例8: test_multi_dim
def test_multi_dim(self):
"""Asserts evaluation metrics for multi-dimensional input and logits."""
global_step = 100
dnn_testing_utils.create_checkpoint(
(([[.6, .5], [-.6, -.5]], [.1, -.1]), ([[1., .8], [-.8, -1.]],
[.2, -.2]),
([[-1., 1., .5], [-1., 1., .5]], [.3, -.3,
.0]),), global_step, self._model_dir)
n_classes = 3
dnn_classifier = dnn.DNNClassifier(
hidden_units=(2, 2),
feature_columns=[feature_column.numeric_column('age', shape=[2])],
n_classes=n_classes,
model_dir=self._model_dir)
def _input_fn():
# batch_size = 2, one false label, and one true.
return {'age': [[10., 8.], [10., 8.]]}, [[1], [0]]
# Uses identical numbers as
# DNNModelFnTest.test_multi_dim_input_multi_dim_logits.
# See that test for calculation of logits.
# logits = [[-0.48, 0.48, 0.39], [-0.48, 0.48, 0.39]]
# probabilities = exp(logits)/sum(exp(logits))
# = [[0.16670536, 0.43538380, 0.39791084],
# [0.16670536, 0.43538380, 0.39791084]]
# loss = -log(0.43538380) - log(0.16670536)
expected_loss = 2.62305466
self.assertAllClose({
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
metric_keys.MetricKeys.ACCURACY: 0.5,
ops.GraphKeys.GLOBAL_STEP: global_step
}, dnn_classifier.evaluate(input_fn=_input_fn, steps=1))
示例9: test_one_dim
def test_one_dim(self):
"""Asserts evaluation metrics for one-dimensional input and logits."""
global_step = 100
dnn_testing_utils.create_checkpoint(
(([[.6, .5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]),
([[-1.], [1.]], [.3]),), global_step, self._model_dir)
dnn_classifier = dnn.DNNClassifier(
hidden_units=(2, 2),
feature_columns=[feature_column.numeric_column('age')],
model_dir=self._model_dir)
def _input_fn():
# batch_size = 2, one false label, and one true.
return {'age': [[10.], [10.]]}, [[1], [0]]
# Uses identical numbers as DNNModelTest.test_one_dim_logits.
# See that test for calculation of logits.
# logits = [[-2.08], [-2.08]] =>
# logistic = 1/(1 + exp(-logits)) = [[0.11105597], [0.11105597]]
# loss = -1. * log(0.111) -1. * log(0.889) = 2.31544200
expected_loss = 2.31544200
self.assertAllClose({
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2.,
metric_keys.MetricKeys.ACCURACY: 0.5,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.11105597,
metric_keys.MetricKeys.LABEL_MEAN: 0.5,
metric_keys.MetricKeys.ACCURACY_BASELINE: 0.5,
# There is no good way to calculate AUC for only two data points. But
# that is what the algorithm returns.
metric_keys.MetricKeys.AUC: 0.5,
metric_keys.MetricKeys.AUC_PR: 0.75,
ops.GraphKeys.GLOBAL_STEP: global_step
}, dnn_classifier.evaluate(input_fn=_input_fn, steps=1))
示例10: test_from_scratch_validate_summary
def test_from_scratch_validate_summary(self):
hidden_units = (2, 2)
mock_optimizer = _mock_optimizer(self, hidden_units=hidden_units)
dnn_classifier = dnn.DNNClassifier(
hidden_units=hidden_units,
feature_columns=(feature_column.numeric_column('age'),),
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, then validate optimizer, summaries, and
# checkpoint.
num_steps = 5
summary_hook = _SummaryHook()
dnn_classifier.train(
input_fn=lambda: ({'age': [[10.]]}, [[1]]), steps=num_steps,
hooks=(summary_hook,))
self.assertEqual(1, mock_optimizer.minimize.call_count)
_assert_checkpoint(
self, num_steps, input_units=1, hidden_units=hidden_units,
output_units=1, model_dir=self._model_dir)
summaries = summary_hook.summaries()
self.assertEqual(num_steps, len(summaries))
for summary in summaries:
summary_keys = [v.tag for v in summary.value]
self.assertIn(metric_keys.MetricKeys.LOSS, summary_keys)
self.assertIn(metric_keys.MetricKeys.LOSS_MEAN, summary_keys)
示例11: test_multi_dim_weights
def test_multi_dim_weights(self):
"""Asserts evaluation metrics for multi-dimensional input and logits."""
# same checkpoint with test_multi_dim.
global_step = 100
create_checkpoint((([[.6, .5], [-.6, -.5]],
[.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]),
([[-1., 1., .5], [-1., 1., .5]], [.3, -.3, .0]),),
global_step, self._model_dir)
label_dimension = 3
dnn_regressor = self._dnn_regressor_fn(
hidden_units=(2, 2),
feature_columns=[feature_column.numeric_column('age', shape=[2])],
label_dimension=label_dimension,
weight_column='w',
model_dir=self._model_dir)
def _input_fn():
return {'age': [[10., 8.]], 'w': [10.]}, [[1., -1., 0.5]]
# Uses identical numbers as test_multi_dim.
# See that test for calculation of logits.
# loss = 4.3929*10
expected_loss = 43.929
metrics = dnn_regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertAlmostEqual(
expected_loss, metrics[metric_keys.MetricKeys.LOSS], places=3)
示例12: test_ar_lstm_regressor
def test_ar_lstm_regressor(self):
dtype = dtypes.float32
model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
exogenous_feature_columns = (
feature_column.numeric_column("exogenous"),
)
estimator = estimators.LSTMAutoRegressor(
periodicities=10,
input_window_size=10,
output_window_size=6,
model_dir=model_dir,
num_features=1,
extra_feature_columns=exogenous_feature_columns,
num_units=10,
config=_SeedRunConfig())
times = numpy.arange(20, dtype=numpy.int64)
values = numpy.arange(20, dtype=dtype.as_numpy_dtype)
exogenous = numpy.arange(20, dtype=dtype.as_numpy_dtype)
features = {
feature_keys.TrainEvalFeatures.TIMES: times,
feature_keys.TrainEvalFeatures.VALUES: values,
"exogenous": exogenous
}
train_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(features), shuffle_seed=2, num_threads=1,
batch_size=16, window_size=16)
eval_input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(features), shuffle_seed=3, num_threads=1,
batch_size=16, window_size=16)
estimator.train(input_fn=train_input_fn, steps=1)
evaluation = estimator.evaluate(
input_fn=eval_input_fn, steps=1)
self.assertAllEqual(evaluation["loss"], evaluation["average_loss"])
self.assertAllEqual([], evaluation["loss"].shape)
示例13: test_dnn_and_linear_logits_are_added
def test_dnn_and_linear_logits_are_added(self):
with ops.Graph().as_default():
variables_lib.Variable([[1.0]], name='linear/linear_model/x/weights')
variables_lib.Variable([2.0], name='linear/linear_model/bias_weights')
variables_lib.Variable([[3.0]], name='dnn/hiddenlayer_0/kernel')
variables_lib.Variable([4.0], name='dnn/hiddenlayer_0/bias')
variables_lib.Variable([[5.0]], name='dnn/logits/kernel')
variables_lib.Variable([6.0], name='dnn/logits/bias')
variables_lib.Variable(1, name='global_step', dtype=dtypes.int64)
linear_testing_utils.save_variables_to_ckpt(self._model_dir)
x_column = feature_column.numeric_column('x')
est = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[x_column],
dnn_hidden_units=[1],
dnn_feature_columns=[x_column],
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[10.]])}, batch_size=1, shuffle=False)
# linear logits = 10*1 + 2 = 12
# dnn logits = (10*3 + 4)*5 + 6 = 176
# logits = dnn + linear = 176 + 12 = 188
self.assertAllClose(
{
prediction_keys.PredictionKeys.PREDICTIONS: [188.],
},
next(est.predict(input_fn=input_fn)))
示例14: _test_parsed_sequence_example
def _test_parsed_sequence_example(
self, col_name, col_fn, col_arg, shape, values):
"""Helper function to check that each FeatureColumn parses correctly.
Args:
col_name: string, name to give to the feature column. Should match
the name that the column will parse out of the features dict.
col_fn: function used to create the feature column. For example,
sequence_numeric_column.
col_arg: second arg that the target feature column is expecting.
shape: the expected dense_shape of the feature after parsing into
a SparseTensor.
values: the expected values at index [0, 2, 6] of the feature
after parsing into a SparseTensor.
"""
example = _make_sequence_example()
columns = [
fc.categorical_column_with_identity('int_ctx', num_buckets=100),
fc.numeric_column('float_ctx'),
col_fn(col_name, col_arg)
]
context, seq_features = parsing_ops.parse_single_sequence_example(
example.SerializeToString(),
context_features=fc.make_parse_example_spec(columns[:2]),
sequence_features=fc.make_parse_example_spec(columns[2:]))
with self.cached_session() as sess:
ctx_result, seq_result = sess.run([context, seq_features])
self.assertEqual(list(seq_result[col_name].dense_shape), shape)
self.assertEqual(
list(seq_result[col_name].values[[0, 2, 6]]), values)
self.assertEqual(list(ctx_result['int_ctx'].dense_shape), [1])
self.assertEqual(ctx_result['int_ctx'].values[0], 5)
self.assertEqual(list(ctx_result['float_ctx'].shape), [1])
self.assertAlmostEqual(ctx_result['float_ctx'][0], 123.6, places=1)
示例15: _test_complete_flow
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, label_dimension, prediction_length):
feature_columns = [
feature_column_lib.numeric_column('x', shape=(input_dimension,))
]
est = _baseline_estimator_fn(
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['predictions'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column_lib.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))