当前位置: 首页>>代码示例>>Python>>正文


Python feature_column.embedding_column函数代码示例

本文整理汇总了Python中tensorflow.contrib.layers.python.layers.feature_column.embedding_column函数的典型用法代码示例。如果您正苦于以下问题:Python embedding_column函数的具体用法?Python embedding_column怎么用?Python embedding_column使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了embedding_column函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testInitEmbeddingColumnWeightsFromCkpt

  def testInitEmbeddingColumnWeightsFromCkpt(self):
    sparse_col = fc.sparse_column_with_hash_bucket(
        column_name="object_in_image", hash_bucket_size=4)
    # Create _EmbeddingColumn which randomly initializes embedding of size
    # [4, 16].
    embedding_col = fc.embedding_column(sparse_col, dimension=16)

    # Creating a SparseTensor which has all the ids possible for the given
    # vocab.
    input_tensor = sparse_tensor_lib.SparseTensor(
        indices=[[0, 0], [1, 1], [2, 2], [3, 3]],
        values=[0, 1, 2, 3],
        dense_shape=[4, 4])

    # Invoking 'layers.input_from_feature_columns' will create the embedding
    # variable. Creating under scope 'run_1' so as to prevent name conflicts
    # when creating embedding variable for 'embedding_column_pretrained'.
    with variable_scope.variable_scope("run_1"):
      with variable_scope.variable_scope(embedding_col.name):
        # This will return a [4, 16] tensor which is same as embedding variable.
        embeddings = feature_column_ops.input_from_feature_columns({
            embedding_col: input_tensor
        }, [embedding_col])

    save = saver.Saver()
    ckpt_dir_prefix = os.path.join(self.get_temp_dir(),
                                   "init_embedding_col_w_from_ckpt")
    ckpt_dir = tempfile.mkdtemp(prefix=ckpt_dir_prefix)
    checkpoint_path = os.path.join(ckpt_dir, "model.ckpt")

    with self.test_session() as sess:
      sess.run(variables.global_variables_initializer())
      saved_embedding = embeddings.eval()
      save.save(sess, checkpoint_path)

    embedding_col_initialized = fc.embedding_column(
        sparse_id_column=sparse_col,
        dimension=16,
        ckpt_to_load_from=checkpoint_path,
        tensor_name_in_ckpt=("run_1/object_in_image_embedding/"
                             "input_from_feature_columns/object"
                             "_in_image_embedding/weights"))

    with variable_scope.variable_scope("run_2"):
      # This will initialize the embedding from provided checkpoint and return a
      # [4, 16] tensor which is same as embedding variable. Since we didn't
      # modify embeddings, this should be same as 'saved_embedding'.
      pretrained_embeddings = feature_column_ops.input_from_feature_columns({
          embedding_col_initialized: input_tensor
      }, [embedding_col_initialized])

    with self.test_session() as sess:
      sess.run(variables.global_variables_initializer())
      loaded_embedding = pretrained_embeddings.eval()

    self.assertAllClose(saved_embedding, loaded_embedding)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:56,代码来源:feature_column_test.py

示例2: testRegression_TensorData

  def testRegression_TensorData(self):
    """Tests regression using tensor data as input."""

    def _input_fn(num_epochs=None):
      features = {
          'age':
              input_lib.limit_epochs(
                  constant_op.constant([[.8], [.15], [0.]]),
                  num_epochs=num_epochs),
          'language':
              sparse_tensor.SparseTensor(
                  values=input_lib.limit_epochs(
                      ['en', 'fr', 'zh'], num_epochs=num_epochs),
                  indices=[[0, 0], [0, 1], [2, 0]],
                  dense_shape=[3, 2])
      }
      return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)

    language_column = feature_column.sparse_column_with_hash_bucket(
        'language', hash_bucket_size=20)
    feature_columns = [
        feature_column.embedding_column(
            language_column, dimension=1),
        feature_column.real_valued_column('age')
    ]

    regressor = dnn.DNNRegressor(
        feature_columns=feature_columns,
        hidden_units=[3, 3],
        config=run_config.RunConfig(tf_random_seed=1))

    regressor.fit(input_fn=_input_fn, steps=200)

    scores = regressor.evaluate(input_fn=_input_fn, steps=1)
    self.assertIn('loss', scores)
开发者ID:willdzeng,项目名称:tensorflow,代码行数:35,代码来源:dnn_test.py

示例3: testExport

  def testExport(self):
    """Tests export model for servo."""

    def input_fn():
      return {
          'age':
              constant_op.constant([1]),
          'language':
              sparse_tensor.SparseTensor(
                  values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
      }, constant_op.constant([[1]])

    language = feature_column.sparse_column_with_hash_bucket('language', 100)
    feature_columns = [
        feature_column.real_valued_column('age'),
        feature_column.embedding_column(
            language, dimension=1)
    ]

    classifier = dnn.DNNClassifier(
        feature_columns=feature_columns, hidden_units=[3, 3])
    classifier.fit(input_fn=input_fn, steps=5)

    export_dir = tempfile.mkdtemp()
    classifier.export(export_dir)
开发者ID:willdzeng,项目名称:tensorflow,代码行数:25,代码来源:dnn_test.py

示例4: testEmbeddingColumn

 def testEmbeddingColumn(self):
   a = fc.sparse_column_with_hash_bucket(
       "aaa", hash_bucket_size=100, combiner="sum")
   b = fc.embedding_column(a, dimension=4, combiner="mean")
   self.assertEqual(b.sparse_id_column.name, "aaa")
   self.assertEqual(b.dimension, 4)
   self.assertEqual(b.combiner, "mean")
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:7,代码来源:feature_column_test.py

示例5: testExport

  def testExport(self):
    """Tests export model for servo."""

    def input_fn():
      return {
          'age':
              constant_op.constant([1]),
          'language':
              sparse_tensor.SparseTensor(
                  values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
      }, constant_op.constant([[1]])

    language = feature_column.sparse_column_with_hash_bucket('language', 100)
    feature_columns = [
        feature_column.real_valued_column('age'),
        feature_column.embedding_column(
            language, dimension=1)
    ]

    classifier = debug.DebugClassifier(config=run_config.RunConfig(
        tf_random_seed=1))
    classifier.fit(input_fn=input_fn, steps=5)

    def default_input_fn(unused_estimator, examples):
      return feature_column_ops.parse_feature_columns_from_examples(
          examples, feature_columns)

    export_dir = tempfile.mkdtemp()
    classifier.export(export_dir, input_fn=default_input_fn)
开发者ID:eduardofv,项目名称:tensorflow,代码行数:29,代码来源:debug_test.py

示例6: testMultipliesGradient

  def testMultipliesGradient(self):
    embedding_language = feature_column.embedding_column(
        feature_column.sparse_column_with_hash_bucket('language', 10),
        dimension=1,
        initializer=init_ops.constant_initializer(0.1))
    embedding_wire = feature_column.embedding_column(
        feature_column.sparse_column_with_hash_bucket('wire', 10),
        dimension=1,
        initializer=init_ops.constant_initializer(0.1))

    params = {
        'feature_columns': [embedding_language, embedding_wire],
        'head': head_lib._multi_class_head(2),
        'hidden_units': [1],
        # Set lr mult to 0. to keep embeddings constant.
        'embedding_lr_multipliers': {
            embedding_language: 0.0
        },
    }
    features = {
        'language':
            sparse_tensor.SparseTensor(
                values=['en', 'fr', 'zh'],
                indices=[[0, 0], [1, 0], [2, 0]],
                dense_shape=[3, 1]),
        'wire':
            sparse_tensor.SparseTensor(
                values=['omar', 'stringer', 'marlo'],
                indices=[[0, 0], [1, 0], [2, 0]],
                dense_shape=[3, 1]),
    }
    labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
    model_ops = dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN,
                                  params)
    with monitored_session.MonitoredSession() as sess:
      language_var = dnn_linear_combined._get_embedding_variable(
          embedding_language, 'dnn', 'dnn/input_from_feature_columns')
      wire_var = dnn_linear_combined._get_embedding_variable(
          embedding_wire, 'dnn', 'dnn/input_from_feature_columns')
      for _ in range(2):
        _, language_value, wire_value = sess.run(
            [model_ops.train_op, language_var, wire_var])
      initial_value = np.full_like(language_value, 0.1)
      self.assertTrue(np.all(np.isclose(language_value, initial_value)))
      self.assertFalse(np.all(np.isclose(wire_value, initial_value)))
开发者ID:willdzeng,项目名称:tensorflow,代码行数:45,代码来源:dnn_test.py

示例7: testEmbeddingColumnDeepCopy

 def testEmbeddingColumnDeepCopy(self):
   a = fc.sparse_column_with_hash_bucket(
       "aaa", hash_bucket_size=100, combiner="sum")
   column = fc.embedding_column(a, dimension=4, combiner="mean")
   column_copy = copy.deepcopy(column)
   self.assertEqual(column_copy.name, "aaa_embedding")
   self.assertEqual(column_copy.sparse_id_column.name, "aaa")
   self.assertEqual(column_copy.dimension, 4)
   self.assertEqual(column_copy.combiner, "mean")
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:9,代码来源:feature_column_test.py

示例8: testEmbeddingMultiplier

 def testEmbeddingMultiplier(self):
   embedding_language = feature_column.embedding_column(
       feature_column.sparse_column_with_hash_bucket('language', 10),
       dimension=1,
       initializer=init_ops.constant_initializer(0.1))
   classifier = dnn.DNNClassifier(
       feature_columns=[embedding_language],
       hidden_units=[3, 3],
       embedding_lr_multipliers={embedding_language: 0.8})
   self.assertEqual({
       embedding_language: 0.8
   }, classifier._estimator.params['embedding_lr_multipliers'])
开发者ID:willdzeng,项目名称:tensorflow,代码行数:12,代码来源:dnn_test.py

示例9: testCreateSequenceFeatureSpec

  def testCreateSequenceFeatureSpec(self):
    sparse_col = fc.sparse_column_with_hash_bucket(
        "sparse_column", hash_bucket_size=100)
    embedding_col = fc.embedding_column(
        fc.sparse_column_with_hash_bucket(
            "sparse_column_for_embedding", hash_bucket_size=10),
        dimension=4)
    sparse_id_col = fc.sparse_column_with_keys("id_column",
                                               ["marlo", "omar", "stringer"])
    weighted_id_col = fc.weighted_sparse_column(sparse_id_col,
                                                "id_weights_column")
    real_valued_col1 = fc.real_valued_column("real_valued_column", dimension=2)
    real_valued_col2 = fc.real_valued_column(
        "real_valued_default_column", dimension=5, default_value=3.0)
    real_valued_col3 = fc._real_valued_var_len_column(
        "real_valued_var_len_column", default_value=3.0, is_sparse=True)
    real_valued_col4 = fc._real_valued_var_len_column(
        "real_valued_var_len_dense_column", default_value=4.0, is_sparse=False)

    feature_columns = set([
        sparse_col, embedding_col, weighted_id_col, real_valued_col1,
        real_valued_col2, real_valued_col3, real_valued_col4
    ])

    feature_spec = fc._create_sequence_feature_spec_for_parsing(feature_columns)

    expected_feature_spec = {
        "sparse_column":
            parsing_ops.VarLenFeature(dtypes.string),
        "sparse_column_for_embedding":
            parsing_ops.VarLenFeature(dtypes.string),
        "id_column":
            parsing_ops.VarLenFeature(dtypes.string),
        "id_weights_column":
            parsing_ops.VarLenFeature(dtypes.float32),
        "real_valued_column":
            parsing_ops.FixedLenSequenceFeature(
                shape=[2], dtype=dtypes.float32, allow_missing=False),
        "real_valued_default_column":
            parsing_ops.FixedLenSequenceFeature(
                shape=[5], dtype=dtypes.float32, allow_missing=True),
        "real_valued_var_len_column":
            parsing_ops.VarLenFeature(dtype=dtypes.float32),
        "real_valued_var_len_dense_column":
            parsing_ops.FixedLenSequenceFeature(
                shape=[], dtype=dtypes.float32, allow_missing=True,
                default_value=4.0),
    }

    self.assertDictEqual(expected_feature_spec, feature_spec)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:50,代码来源:feature_column_test.py

示例10: testTrainWithPartitionedVariables

  def testTrainWithPartitionedVariables(self):
    """Tests training with partitioned variables."""

    def _input_fn(num_epochs=None):
      features = {
          'age':
              input_lib.limit_epochs(
                  constant_op.constant([[.8], [.2], [.1]]),
                  num_epochs=num_epochs),
          'language':
              sparse_tensor.SparseTensor(
                  values=input_lib.limit_epochs(
                      ['en', 'fr', 'zh'], num_epochs=num_epochs),
                  indices=[[0, 0], [0, 1], [2, 0]],
                  dense_shape=[3, 2])
      }
      return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)

    # The given hash_bucket_size results in variables larger than the
    # default min_slice_size attribute, so the variables are partitioned.
    sparse_column = feature_column.sparse_column_with_hash_bucket(
        'language', hash_bucket_size=2e7)
    feature_columns = [
        feature_column.embedding_column(
            sparse_column, dimension=1)
    ]

    tf_config = {
        'cluster': {
            run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
        }
    }
    with test.mock.patch.dict('os.environ',
                              {'TF_CONFIG': json.dumps(tf_config)}):
      config = run_config.RunConfig(tf_random_seed=1)
      # Because we did not start a distributed cluster, we need to pass an
      # empty ClusterSpec, otherwise the device_setter will look for
      # distributed jobs, such as "/job:ps" which are not present.
      config._cluster_spec = server_lib.ClusterSpec({})

    classifier = dnn.DNNClassifier(
        n_classes=3,
        feature_columns=feature_columns,
        hidden_units=[3, 3],
        config=config)

    classifier.fit(input_fn=_input_fn, steps=5)
    scores = classifier.evaluate(input_fn=_input_fn, steps=1)
    self._assertInRange(0.0, 1.0, scores['accuracy'])
    self.assertIn('loss', scores)
开发者ID:willdzeng,项目名称:tensorflow,代码行数:50,代码来源:dnn_test.py

示例11: benchmarkLogisticFloatLabel

  def benchmarkLogisticFloatLabel(self):

    def _input_fn(num_epochs=None):
      features = {
          'age':
              input_lib.limit_epochs(
                  constant_op.constant(((50,), (20,), (10,))),
                  num_epochs=num_epochs),
          'language':
              sparse_tensor.SparseTensor(
                  values=input_lib.limit_epochs(
                      ('en', 'fr', 'zh'), num_epochs=num_epochs),
                  indices=((0, 0), (0, 1), (2, 0)),
                  dense_shape=(3, 2))
      }
      return features, constant_op.constant(
          ((0.8,), (0.,), (0.2,)), dtype=dtypes.float32)

    lang_column = feature_column.sparse_column_with_hash_bucket(
        'language', hash_bucket_size=20)
    n_classes = 2
    classifier = dnn.DNNClassifier(
        n_classes=n_classes,
        feature_columns=(feature_column.embedding_column(
            lang_column, dimension=1),
                         feature_column.real_valued_column('age')),
        hidden_units=(3, 3),
        config=run_config.RunConfig(tf_random_seed=1))
    steps = 1000
    metrics = classifier.fit(input_fn=_input_fn, steps=steps).evaluate(
        input_fn=_input_fn, steps=1)
    estimator_test_utils.assert_in_range(steps, steps + 5, 'global_step',
                                         metrics)

    # Prediction probabilities mirror the labels column, which proves that the
    # classifier learns from float input.
    self._report_metrics(metrics)
    self._report_predictions(
        classifier=classifier,
        input_fn=functools.partial(_input_fn, num_epochs=1),
        iters=metrics['global_step'],
        n_examples=3,
        n_classes=n_classes,
        expected_probabilities=((0.2, 0.8), (1., 0.), (0.8, 0.2)),
        expected_classes=(1, 0, 0),
        benchmark_name_override=(
            'DNNClassifierBenchmark.benchmarkLogisticFloatLabel_predictions'))
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:47,代码来源:dnn_benchmark_test.py

示例12: testPredict_AsIterable

  def testPredict_AsIterable(self):
    """Tests predict and predict_prob methods with as_iterable=True."""

    def _input_fn(num_epochs=None):
      features = {
          'age':
              input_lib.limit_epochs(
                  constant_op.constant([[.8], [.2], [.1]]),
                  num_epochs=num_epochs),
          'language':
              sparse_tensor.SparseTensor(
                  values=input_lib.limit_epochs(
                      ['en', 'fr', 'zh'], num_epochs=num_epochs),
                  indices=[[0, 0], [0, 1], [2, 0]],
                  dense_shape=[3, 2])
      }
      return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)

    language_column = feature_column.sparse_column_with_hash_bucket(
        'language', hash_bucket_size=20)
    feature_columns = [
        feature_column.embedding_column(
            language_column, dimension=1),
        feature_column.real_valued_column('age')
    ]

    classifier = dnn.DNNClassifier(
        n_classes=3,
        feature_columns=feature_columns,
        hidden_units=[3, 3],
        config=run_config.RunConfig(tf_random_seed=1))

    classifier.fit(input_fn=_input_fn, steps=200)

    scores = classifier.evaluate(input_fn=_input_fn, steps=1)
    self._assertInRange(0.0, 1.0, scores['accuracy'])
    self.assertIn('loss', scores)
    predict_input_fn = functools.partial(_input_fn, num_epochs=1)
    predictions = list(
        classifier.predict(
            input_fn=predict_input_fn, as_iterable=True))
    self.assertListEqual(predictions, [1, 0, 0])
    predictions = list(
        classifier.predict_proba(
            input_fn=predict_input_fn, as_iterable=True))
    self.assertAllClose(
        predictions, [[0., 1., 0.], [1., 0., 0.], [1., 0., 0.]], atol=0.3)
开发者ID:willdzeng,项目名称:tensorflow,代码行数:47,代码来源:dnn_test.py

示例13: testPrepareInputsForRnnSparseAndDense

  def testPrepareInputsForRnnSparseAndDense(self):
    num_unroll = 2
    embedding_dimension = 8
    dense_dimension = 2

    expected = [
        np.array([[1., 1., 1., 1., 1., 1., 1., 1., 111., 112.],
                  [1., 1., 1., 1., 1., 1., 1., 1., 211., 212.],
                  [1., 1., 1., 1., 1., 1., 1., 1., 311., 312.]]),
        np.array([[1., 1., 1., 1., 1., 1., 1., 1., 121., 122.],
                  [2., 2., 2., 2., 2., 2., 2., 2., 221., 222.],
                  [1., 1., 1., 1., 1., 1., 1., 1., 321., 322.]])
    ]

    sequence_features = {
        'wire_cast':
            sparse_tensor.SparseTensor(
                indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 0], [1, 1, 1],
                         [2, 0, 0], [2, 1, 1]],
                values=[
                    b'marlo', b'stringer', b'omar', b'stringer', b'marlo',
                    b'marlo', b'omar'
                ],
                dense_shape=[3, 2, 2]),
        'seq_feature0':
            constant_op.constant([[[111., 112.], [121., 122.]],
                                  [[211., 212.], [221., 222.]],
                                  [[311., 312.], [321., 322.]]])
    }

    wire_cast = feature_column.sparse_column_with_keys(
        'wire_cast', ['marlo', 'omar', 'stringer'])
    wire_cast_embedded = feature_column.embedding_column(
        wire_cast,
        dimension=embedding_dimension,
        combiner='sum',
        initializer=init_ops.ones_initializer())
    seq_feature0_column = feature_column.real_valued_column(
        'seq_feature0', dimension=dense_dimension)

    sequence_feature_columns = [seq_feature0_column, wire_cast_embedded]

    context_features = None

    self._test_prepare_inputs_for_rnn(sequence_features, context_features,
                                      sequence_feature_columns, num_unroll,
                                      expected)
开发者ID:finardi,项目名称:tensorflow,代码行数:47,代码来源:state_saving_rnn_estimator_test.py

示例14: testTrainSaveLoad

  def testTrainSaveLoad(self):
    """Tests that insures you can save and reload a trained model."""

    def _input_fn(num_epochs=None):
      features = {
          'age':
              input_lib.limit_epochs(
                  constant_op.constant([[.8], [.2], [.1]]),
                  num_epochs=num_epochs),
          'language':
              sparse_tensor.SparseTensor(
                  values=input_lib.limit_epochs(
                      ['en', 'fr', 'zh'], num_epochs=num_epochs),
                  indices=[[0, 0], [0, 1], [2, 0]],
                  dense_shape=[3, 2])
      }
      return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)

    sparse_column = feature_column.sparse_column_with_hash_bucket(
        'language', hash_bucket_size=20)
    feature_columns = [
        feature_column.embedding_column(
            sparse_column, dimension=1)
    ]

    model_dir = tempfile.mkdtemp()
    classifier = dnn.DNNClassifier(
        model_dir=model_dir,
        n_classes=3,
        feature_columns=feature_columns,
        hidden_units=[3, 3],
        config=run_config.RunConfig(tf_random_seed=1))

    classifier.fit(input_fn=_input_fn, steps=5)
    predict_input_fn = functools.partial(_input_fn, num_epochs=1)
    predictions1 = classifier.predict(input_fn=predict_input_fn)
    del classifier

    classifier2 = dnn.DNNClassifier(
        model_dir=model_dir,
        n_classes=3,
        feature_columns=feature_columns,
        hidden_units=[3, 3],
        config=run_config.RunConfig(tf_random_seed=1))
    predictions2 = classifier2.predict(input_fn=predict_input_fn)
    self.assertEqual(list(predictions1), list(predictions2))
开发者ID:willdzeng,项目名称:tensorflow,代码行数:46,代码来源:dnn_test.py

示例15: setUp

  def setUp(self):
    super(DynamicRnnEstimatorTest, self).setUp()
    self.rnn_cell = core_rnn_cell_impl.BasicRNNCell(self.NUM_RNN_CELL_UNITS)
    self.mock_target_column = MockTargetColumn(
        num_label_columns=self.NUM_LABEL_COLUMNS)

    location = feature_column.sparse_column_with_keys(
        'location', keys=['west_side', 'east_side', 'nyc'])
    location_onehot = feature_column.one_hot_column(location)
    self.context_feature_columns = [location_onehot]

    wire_cast = feature_column.sparse_column_with_keys(
        'wire_cast', ['marlo', 'omar', 'stringer'])
    wire_cast_embedded = feature_column.embedding_column(wire_cast, dimension=8)
    measurements = feature_column.real_valued_column(
        'measurements', dimension=2)
    self.sequence_feature_columns = [measurements, wire_cast_embedded]
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:17,代码来源:dynamic_rnn_estimator_test.py


注:本文中的tensorflow.contrib.layers.python.layers.feature_column.embedding_column函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。