当前位置: 首页>>代码示例>>Python>>正文


Python layers.real_valued_column函数代码示例

本文整理汇总了Python中tensorflow.contrib.layers.real_valued_column函数的典型用法代码示例。如果您正苦于以下问题:Python real_valued_column函数的具体用法?Python real_valued_column怎么用?Python real_valued_column使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了real_valued_column函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testLinearlySeparableBinaryDataNoKernels

  def testLinearlySeparableBinaryDataNoKernels(self):
    """Tests classifier w/o kernels (log. regression) for lin-separable data."""

    feature1 = layers.real_valued_column('feature1')
    feature2 = layers.real_valued_column('feature2')

    logreg_classifier = kernel_estimators.KernelLinearClassifier(
        feature_columns=[feature1, feature2])
    logreg_classifier.fit(
        input_fn=_linearly_separable_binary_input_fn, steps=100)

    metrics = logreg_classifier.evaluate(
        input_fn=_linearly_separable_binary_input_fn, steps=1)
    # Since the data is linearly separable, the classifier should have small
    # loss and perfect accuracy.
    self.assertLess(metrics['loss'], 0.1)
    self.assertEqual(metrics['accuracy'], 1.0)

    # As a result, it should assign higher probability to class 1 for the 1st
    # and 3rd example and higher probability to class 0 for the second example.
    logreg_prob_predictions = list(
        logreg_classifier.predict_proba(input_fn=
                                        _linearly_separable_binary_input_fn))
    self.assertGreater(logreg_prob_predictions[0][1], 0.5)
    self.assertGreater(logreg_prob_predictions[1][0], 0.5)
    self.assertGreater(logreg_prob_predictions[2][1], 0.5)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:26,代码来源:kernel_estimators_test.py

示例2: get_wide_deep

def get_wide_deep():
  # define column types
  races = ['White', 'Black', 'American Indian', 'Chinese', 
           'Japanese', 'Hawaiian', 'Filipino', 'Unknown',
           'Asian Indian', 'Korean', 'Samaon', 'Vietnamese']
  is_male,mother_age,mother_race,plurality,gestation_weeks,mother_married,cigarette_use,alcohol_use = \
   [ \
    tflayers.sparse_column_with_keys('is_male', keys=['True', 'False']),
    tflayers.real_valued_column('mother_age'),
    tflayers.sparse_column_with_keys('mother_race', keys=races),
    tflayers.real_valued_column('plurality'),
    tflayers.real_valued_column('gestation_weeks'),
    tflayers.sparse_column_with_keys('mother_married', keys=['True', 'False']),
    tflayers.sparse_column_with_keys('cigarette_use', keys=['True', 'False', 'None']),
    tflayers.sparse_column_with_keys('alcohol_use', keys=['True', 'False', 'None'])
    ]

  # which columns are wide (sparse, linear relationship to output) and which are deep (complex relationship to output?)  
  wide = [is_male, mother_race, plurality, mother_married, cigarette_use, alcohol_use]
  deep = [\
                mother_age,
                gestation_weeks,
                tflayers.embedding_column(mother_race, 3)
               ]
  return wide, deep
开发者ID:rpc01,项目名称:training-data-analyst,代码行数:25,代码来源:model.py

示例3: testMulticlassDataWithAndWithoutKernels

  def testMulticlassDataWithAndWithoutKernels(self):
    """Tests classifier w/ and w/o kernels on multiclass data."""
    feature_column = layers.real_valued_column('feature', dimension=4)

    # Metrics for linear classifier (no kernels).
    linear_classifier = kernel_estimators.KernelLinearClassifier(
        feature_columns=[feature_column], n_classes=3)
    linear_classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=50)
    linear_metrics = linear_classifier.evaluate(
        input_fn=test_data.iris_input_multiclass_fn, steps=1)
    linear_loss = linear_metrics['loss']
    linear_accuracy = linear_metrics['accuracy']

    # Using kernel mappers allows to discover non-linearities in data (via RBF
    # kernel approximation), reduces loss and increases accuracy.
    kernel_mappers = {
        feature_column: [
            RandomFourierFeatureMapper(
                input_dim=4, output_dim=50, stddev=1.0, name='rffm')
        ]
    }
    kernel_linear_classifier = kernel_estimators.KernelLinearClassifier(
        feature_columns=[], n_classes=3, kernel_mappers=kernel_mappers)
    kernel_linear_classifier.fit(
        input_fn=test_data.iris_input_multiclass_fn, steps=50)
    kernel_linear_metrics = kernel_linear_classifier.evaluate(
        input_fn=test_data.iris_input_multiclass_fn, steps=1)
    kernel_linear_loss = kernel_linear_metrics['loss']
    kernel_linear_accuracy = kernel_linear_metrics['accuracy']
    self.assertLess(kernel_linear_loss, linear_loss)
    self.assertGreater(kernel_linear_accuracy, linear_accuracy)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:31,代码来源:kernel_estimators_test.py

示例4: get_conv_classifier

def get_conv_classifier():
    n_classes = 5
    feature_columns = [layers.real_valued_column("", dimension=3)]

    # learning_rate = 1.0
    # optimizer = AdagradOptimizer(learning_rate)
    #
    # learning_rate = 1.0
    # optimizer = AdadeltaOptimizer(learning_rate=learning_rate)

    # ~ 62.55%
    learning_rate = 0.01
    optimizer = AdamOptimizer(learning_rate, epsilon=0.1)

    # learning_rate = 0.05
    # optimizer = GradientDescentOptimizer(learning_rate)

    # learning_rate = 0.1
    # optimizer = RMSPropOptimizer(learning_rate, momentum=0.1)

    # learning_rate = 0.1
    # optimizer = FtrlOptimizer(learning_rate)

    return SKCompat(Estimator(
        model_fn=get_conv_model,
        params={
            'head': head_lib._multi_class_head(  # pylint: disable=protected-access
                n_classes,
                enable_centered_bias=False),
            'feature_columns': feature_columns,
            'activation_fn': tf.nn.relu,
            'learning_rate': learning_rate,
            'optimizer': optimizer
        },
        model_dir='saved_model'))
开发者ID:soswow,项目名称:Various-JS-and-Python,代码行数:35,代码来源:machine_learning.py

示例5: testInvalidNumberOfClasses

  def testInvalidNumberOfClasses(self):
    """ValueError raised when the kernel mappers provided have invalid type."""

    feature = layers.real_valued_column('feature')
    with self.assertRaises(ValueError):
      _ = kernel_estimators.KernelLinearClassifier(
          feature_columns=[feature], n_classes=1)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:7,代码来源:kernel_estimators_test.py

示例6: _add_bias_column

def _add_bias_column(feature_columns, columns_to_tensors, bias_variable, targets, columns_to_variables):
    # TODO(b/31008490): Move definition to a common constants place.
    bias_column_name = "tf_virtual_bias_column"
    if any(col.name is bias_column_name for col in feature_columns):
        raise ValueError("%s is a reserved column name." % bias_column_name)
    bias_column = layers.real_valued_column(bias_column_name)
    columns_to_tensors[bias_column] = array_ops.ones_like(targets, dtype=dtypes.float32)
    columns_to_variables[bias_column] = [bias_variable]
开发者ID:flyingbirdman,项目名称:tensorflow,代码行数:8,代码来源:linear.py

示例7: get_wide_deep

def get_wide_deep():
    # define column types
    
    StyleName,quantity, demand, org_ret_price,sell_price, margin, off_orig_retail, total_ots = \
    [ \
    tflayers.sparse_column_with_hash_bucket('Style_Name', hash_bucket_size = 1000),
    tflayers.real_valued_column('Quantity'),
    tflayers.real_valued_column('Demand'),
    tflayers.real_valued_column('Original_Retail_Price'),
    tflayers.real_valued_column('Selling_Price'),
    tflayers.real_valued_column('Margin'),
    tflayers.real_valued_column('off_Orig_Retail'),
    tflayers.real_valued_column('Total_OTS'),
    ]
    # which columns are wide (sparse, linear relationship to output) and which are deep (complex relationship to output?)  
    wide = [StyleName,quantity, demand]
    deep = [\
               org_ret_price,
               sell_price,
               margin,
               off_orig_retail,
               total_ots,
               tflayers.embedding_column(StyleName, 3)
               ]
    return wide, deep
开发者ID:tpatil0412,项目名称:Dynamic-Pricing,代码行数:25,代码来源:model.py

示例8: get_features_ch8

def get_features_ch8():
    """Using the three inputs we originally used in Chapter 7, plus the time averages computed in Chapter 8"""
    real = {
      colname : tflayers.real_valued_column(colname) \
          for colname in \
            ('dep_delay,taxiout,distance,avg_dep_delay,avg_arr_delay').split(',')
    }
    sparse = {}
    return real, sparse
开发者ID:yogiadi,项目名称:data-science-on-gcp,代码行数:9,代码来源:model.py

示例9: get_features_ch7

def get_features_ch7():
    """Using only the three inputs we originally used in Chapter 7"""
    real = {
      colname : tflayers.real_valued_column(colname) \
          for colname in \
            ('dep_delay,taxiout,distance').split(',')
    }
    sparse = {}
    return real, sparse
开发者ID:yogiadi,项目名称:data-science-on-gcp,代码行数:9,代码来源:model.py

示例10: get_classifier

def get_classifier():
    # (kernel_size * kernel_size, 3)
    feature_columns = [layers.real_valued_column("", dimension=3)]
    return DNNClassifier(feature_columns=feature_columns,
                         hidden_units=[256, 128],
                         n_classes=5,
                         model_dir="saved_model",
                         # optimizer=AdadeltaOptimizer(learning_rate=0.1)
                         # optimizer=AdamOptimizer()
                         # dropout=0.5
                         )
开发者ID:soswow,项目名称:Various-JS-and-Python,代码行数:11,代码来源:machine_learning.py

示例11: testInvalidKernelMapper

  def testInvalidKernelMapper(self):
    """ValueError raised when the kernel mappers provided have invalid type."""

    class DummyKernelMapper(object):

      def __init__(self):
        pass

    feature = layers.real_valued_column('feature')
    kernel_mappers = {feature: [DummyKernelMapper()]}
    with self.assertRaises(ValueError):
      _ = kernel_estimators.KernelLinearClassifier(
          feature_columns=[feature], kernel_mappers=kernel_mappers)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:13,代码来源:kernel_estimators_test.py

示例12: get_features_raw

def get_features_raw():
    real = {
      colname : tflayers.real_valued_column(colname) \
          for colname in \
            ('dep_delay,taxiout,distance,avg_dep_delay,avg_arr_delay' + 
             ',dep_lat,dep_lon,arr_lat,arr_lon').split(',')
    }
    sparse = {
      'carrier': tflayers.sparse_column_with_keys('carrier',
                  keys='AS,VX,F9,UA,US,WN,HA,EV,MQ,DL,OO,B6,NK,AA'.split(',')),
      'origin' : tflayers.sparse_column_with_hash_bucket('origin', hash_bucket_size=1000), # FIXME
      'dest'   : tflayers.sparse_column_with_hash_bucket('dest', hash_bucket_size=1000) #FIXME
    }
    return real, sparse
开发者ID:yogiadi,项目名称:data-science-on-gcp,代码行数:14,代码来源:model.py

示例13: testExtractFeaturesWithTransformation

 def testExtractFeaturesWithTransformation(self):
   """Tests feature extraction."""
   with self.test_session():
     features = {}
     features["dense_float"] = array_ops.zeros([2, 1], dtypes.float32)
     features["sparse_float"] = sparse_tensor.SparseTensor(
         array_ops.zeros([2, 2], dtypes.int64),
         array_ops.zeros([2], dtypes.float32),
         array_ops.zeros([2], dtypes.int64))
     features["sparse_categorical"] = sparse_tensor.SparseTensor(
         array_ops.zeros([2, 2], dtypes.int64),
         array_ops.zeros(
             [2], dtypes.string), array_ops.zeros([2], dtypes.int64))
     feature_columns = set()
     feature_columns.add(layers.real_valued_column("dense_float"))
     feature_columns.add(
         layers.feature_column._real_valued_var_len_column(
             "sparse_float", is_sparse=True))
     feature_columns.add(
         feature_column_lib.sparse_column_with_hash_bucket(
             "sparse_categorical", hash_bucket_size=1000000))
     (fc_names, dense_floats, sparse_float_indices, sparse_float_values,
      sparse_float_shapes, sparse_int_indices, sparse_int_values,
      sparse_int_shapes) = (gbdt_batch.extract_features(
          features, feature_columns))
     self.assertEqual(len(fc_names), 3)
     self.assertAllEqual(fc_names,
                         ["dense_float", "sparse_float", "sparse_categorical"])
     self.assertEqual(len(dense_floats), 1)
     self.assertEqual(len(sparse_float_indices), 1)
     self.assertEqual(len(sparse_float_values), 1)
     self.assertEqual(len(sparse_float_shapes), 1)
     self.assertEqual(len(sparse_int_indices), 1)
     self.assertEqual(len(sparse_int_values), 1)
     self.assertEqual(len(sparse_int_shapes), 1)
     self.assertAllEqual(dense_floats[0].eval(),
                         features["dense_float"].eval())
     self.assertAllEqual(sparse_float_indices[0].eval(),
                         features["sparse_float"].indices.eval())
     self.assertAllEqual(sparse_float_values[0].eval(),
                         features["sparse_float"].values.eval())
     self.assertAllEqual(sparse_float_shapes[0].eval(),
                         features["sparse_float"].dense_shape.eval())
     self.assertAllEqual(sparse_int_indices[0].eval(),
                         features["sparse_categorical"].indices.eval())
     self.assertAllEqual(sparse_int_values[0].eval(), [397263, 397263])
     self.assertAllEqual(sparse_int_shapes[0].eval(),
                         features["sparse_categorical"].dense_shape.eval())
开发者ID:chdinh,项目名称:tensorflow,代码行数:48,代码来源:gbdt_batch_test.py

示例14: testClassifierWithAndWithoutKernelsNoRealValuedColumns

  def testClassifierWithAndWithoutKernelsNoRealValuedColumns(self):
    """Tests kernels have no effect for non-real valued columns ."""

    def input_fn():
      return {
          'price':
              constant_op.constant([[0.4], [0.6], [0.3]]),
          'country':
              sparse_tensor.SparseTensor(
                  values=['IT', 'US', 'GB'],
                  indices=[[0, 0], [1, 3], [2, 1]],
                  dense_shape=[3, 5]),
      }, constant_op.constant([[1], [0], [1]])

    price = layers.real_valued_column('price')
    country = layers.sparse_column_with_hash_bucket(
        'country', hash_bucket_size=5)

    linear_classifier = kernel_estimators.KernelLinearClassifier(
        feature_columns=[price, country])
    linear_classifier.fit(input_fn=input_fn, steps=100)
    linear_metrics = linear_classifier.evaluate(input_fn=input_fn, steps=1)
    linear_loss = linear_metrics['loss']
    linear_accuracy = linear_metrics['accuracy']

    kernel_mappers = {
        country: [RandomFourierFeatureMapper(2, 30, 0.6, 1, 'rffm')]
    }

    kernel_linear_classifier = kernel_estimators.KernelLinearClassifier(
        feature_columns=[price, country], kernel_mappers=kernel_mappers)
    kernel_linear_classifier.fit(input_fn=input_fn, steps=100)
    kernel_linear_metrics = kernel_linear_classifier.evaluate(
        input_fn=input_fn, steps=1)
    kernel_linear_loss = kernel_linear_metrics['loss']
    kernel_linear_accuracy = kernel_linear_metrics['accuracy']

    # The kernel mapping is applied to a non-real-valued feature column and so
    # it should have no effect on the model. The loss and accuracy of the
    # "kernelized" model should match the loss and accuracy of the initial model
    # (without kernels).
    self.assertAlmostEqual(linear_loss, kernel_linear_loss, delta=0.01)
    self.assertAlmostEqual(linear_accuracy, kernel_linear_accuracy, delta=0.01)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:43,代码来源:kernel_estimators_test.py

示例15: testLinearlyInseparableBinaryDataWithAndWithoutKernels

  def testLinearlyInseparableBinaryDataWithAndWithoutKernels(self):
    """Tests classifier w/ and w/o kernels on non-linearly-separable data."""
    multi_dim_feature = layers.real_valued_column(
        'multi_dim_feature', dimension=2)

    # Data points are non-linearly separable so there will be at least one
    # mis-classified sample (accuracy < 0.8). In fact, the loss is minimized for
    # w1=w2=0.0, in which case each example incurs a loss of ln(2). The overall
    # (average) loss should then be ln(2) and the logits should be approximately
    # 0.0 for each sample.
    logreg_classifier = kernel_estimators.KernelLinearClassifier(
        feature_columns=[multi_dim_feature])
    logreg_classifier.fit(
        input_fn=_linearly_inseparable_binary_input_fn, steps=50)
    logreg_metrics = logreg_classifier.evaluate(
        input_fn=_linearly_inseparable_binary_input_fn, steps=1)
    logreg_loss = logreg_metrics['loss']
    logreg_accuracy = logreg_metrics['accuracy']
    logreg_predictions = logreg_classifier.predict(
        input_fn=_linearly_inseparable_binary_input_fn, as_iterable=False)
    self.assertAlmostEqual(logreg_loss, np.log(2), places=3)
    self.assertLess(logreg_accuracy, 0.8)
    self.assertAllClose(logreg_predictions['logits'], [[0.0], [0.0], [0.0],
                                                       [0.0]])

    # Using kernel mappers allows to discover non-linearities in data. Mapping
    # the data to a higher dimensional feature space using approx RBF kernels,
    # substantially reduces the loss and leads to perfect classification
    # accuracy.
    kernel_mappers = {
        multi_dim_feature: [RandomFourierFeatureMapper(2, 30, 0.6, 1, 'rffm')]
    }
    kernelized_logreg_classifier = kernel_estimators.KernelLinearClassifier(
        feature_columns=[], kernel_mappers=kernel_mappers)
    kernelized_logreg_classifier.fit(
        input_fn=_linearly_inseparable_binary_input_fn, steps=50)
    kernelized_logreg_metrics = kernelized_logreg_classifier.evaluate(
        input_fn=_linearly_inseparable_binary_input_fn, steps=1)
    kernelized_logreg_loss = kernelized_logreg_metrics['loss']
    kernelized_logreg_accuracy = kernelized_logreg_metrics['accuracy']
    self.assertLess(kernelized_logreg_loss, 0.2)
    self.assertEqual(kernelized_logreg_accuracy, 1.0)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:42,代码来源:kernel_estimators_test.py


注:本文中的tensorflow.contrib.layers.real_valued_column函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。