当前位置: 首页>>代码示例>>Python>>正文


Python SdcaModel.approximate_duality_gap方法代码示例

本文整理汇总了Python中tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel.approximate_duality_gap方法的典型用法代码示例。如果您正苦于以下问题:Python SdcaModel.approximate_duality_gap方法的具体用法?Python SdcaModel.approximate_duality_gap怎么用?Python SdcaModel.approximate_duality_gap使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel的用法示例。


在下文中一共展示了SdcaModel.approximate_duality_gap方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testNoWeightedExamples

# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import approximate_duality_gap [as 别名]
  def testNoWeightedExamples(self):
    # Setup test data with 1 positive, and 1 negative example.
    example_protos = [
        make_example_proto(
            {'age': [0],
             'gender': [0]}, 0),
        make_example_proto(
            {'age': [1],
             'gender': [1]}, 1),
    ]
    # Zeroed out example weights.
    example_weights = [0.0, 0.0]
    with self._single_threaded_test_session():
      examples = make_example_dict(example_protos, example_weights)
      variables = make_variable_dict(1, 1)
      options = dict(symmetric_l2_regularization=1,
                     symmetric_l1_regularization=0,
                     loss_type='logistic_loss')

      lr = SdcaModel(CONTAINER, examples, variables, options)
      tf.initialize_all_variables().run()
      self.assertAllClose([0.5, 0.5], lr.predictions(examples).eval())
      lr.minimize().run()
      self.assertAllClose([0.5, 0.5], lr.predictions(examples).eval())
      with self.assertRaisesOpError(
          'No examples found or all examples have zero weight.'):
        lr.approximate_duality_gap().eval()
开发者ID:CPostelnicu,项目名称:tensorflow,代码行数:29,代码来源:sdca_ops_test.py

示例2: testImbalanced

# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import approximate_duality_gap [as 别名]
    def testImbalanced(self):
        # Setup test data with 1 positive, and 3 negative examples.
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, 0),
            make_example_proto({"age": [2], "gender": [0]}, 0),
            make_example_proto({"age": [3], "gender": [0]}, 0),
            make_example_proto({"age": [1], "gender": [1]}, 1),
        ]
        example_weights = [1.0, 1.0, 1.0, 1.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(3, 1)
            options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, loss_type="logistic_loss")

            lr = SdcaModel(CONTAINER, examples, variables, options)
            tf.initialize_all_variables().run()
            unregularized_loss = lr.unregularized_loss(examples)
            loss = lr.regularized_loss(examples)
            predictions = lr.predictions(examples)
            train_op = lr.minimize()
            for _ in xrange(_MAX_ITERATIONS):
                train_op.run()

            self.assertAllClose(0.226487 + 0.102902, unregularized_loss.eval(), atol=0.08)
            self.assertAllClose(0.328394 + 0.131364, loss.eval(), atol=0.01)
            predicted_labels = get_binary_predictions_for_logistic(predictions)
            self.assertAllEqual([0, 0, 0, 1], predicted_labels.eval())
            self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
开发者ID:cartland,项目名称:tensorflow,代码行数:30,代码来源:sdca_ops_test.py

示例3: testSomeUnweightedExamples

# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import approximate_duality_gap [as 别名]
    def testSomeUnweightedExamples(self):
        # Setup test data with 4 examples, but should produce the same
        # results as testSimple.
        example_protos = [
            # Will be used.
            make_example_proto({"age": [0], "gender": [0]}, 0),
            # Will be ignored.
            make_example_proto({"age": [1], "gender": [0]}, 0),
            # Will be used.
            make_example_proto({"age": [1], "gender": [1]}, 1),
            # Will be ignored.
            make_example_proto({"age": [1], "gender": [0]}, 1),
        ]
        example_weights = [1.0, 0.0, 1.0, 0.0]
        with self._single_threaded_test_session():
            # Only use examples 0 and 2
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, loss_type="logistic_loss")

            lr = SdcaModel(CONTAINER, examples, variables, options)
            tf.initialize_all_variables().run()
            unregularized_loss = lr.unregularized_loss(examples)
            loss = lr.regularized_loss(examples)
            predictions = lr.predictions(examples)
            train_op = lr.minimize()
            for _ in xrange(_MAX_ITERATIONS):
                train_op.run()

            self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
            self.assertAllClose(0.525457, loss.eval(), atol=0.01)
            predicted_labels = get_binary_predictions_for_logistic(predictions)
            self.assertAllClose([0, 1, 1, 1], predicted_labels.eval())
            self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
开发者ID:cartland,项目名称:tensorflow,代码行数:36,代码来源:sdca_ops_test.py

示例4: testSimple

# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import approximate_duality_gap [as 别名]
  def testSimple(self):
    # Setup test data
    example_protos = [
        make_example_proto(
            {'age': [0],
             'gender': [0]}, -10.0),
        make_example_proto(
            {'age': [1],
             'gender': [1]}, 14.0),
    ]
    example_weights = [1.0, 1.0]
    with self._single_threaded_test_session():
      examples = make_example_dict(example_protos, example_weights)
      variables = make_variable_dict(1, 1)
      options = dict(symmetric_l2_regularization=1,
                     symmetric_l1_regularization=0,
                     loss_type='squared_loss')

      lr = SdcaModel(CONTAINER, examples, variables, options)
      tf.initialize_all_variables().run()
      predictions = lr.predictions(examples)

      for _ in xrange(20):
        lr.minimize().run()

      # Predictions should be 2/3 of label due to minimizing regularized loss:
      #   (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2
      self.assertAllClose([-20.0 / 3.0, 28.0 / 3.0],
                          predictions.eval(),
                          rtol=0.005)
      self.assertAllClose(0.01,
                          lr.approximate_duality_gap().eval(),
                          rtol=1e-2,
                          atol=1e-2)
开发者ID:CPostelnicu,项目名称:tensorflow,代码行数:36,代码来源:sdca_ops_test.py

示例5: testSimple

# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import approximate_duality_gap [as 别名]
    def testSimple(self):
        # Setup test data
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, -10.0),
            make_example_proto({"age": [1], "gender": [1]}, 14.0),
        ]
        example_weights = [1.0, 1.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, loss_type="squared_loss")

            lr = SdcaModel(examples, variables, options)
            tf.initialize_all_variables().run()
            predictions = lr.predictions(examples)
            train_op = lr.minimize()
            for _ in range(_MAX_ITERATIONS):
                train_op.run()

            # Predictions should be 2/3 of label due to minimizing regularized loss:
            #   (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2
            self.assertAllClose([-20.0 / 3.0, 28.0 / 3.0], predictions.eval(), rtol=0.005)
            # Approximate gap should be very close to 0.0. (In fact, because the gap
            # is only approximate, it is likely that upon convergence the duality gap
            # can have a tiny negative value).
            self.assertAllClose(0.0, lr.approximate_duality_gap().eval(), atol=1e-2)
开发者ID:apollos,项目名称:tensorflow,代码行数:28,代码来源:sdca_ops_test.py

示例6: testSimple

# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import approximate_duality_gap [as 别名]
    def testSimple(self):
        # Setup test data
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, 0),
            make_example_proto({"age": [1], "gender": [1]}, 1),
        ]
        example_weights = [1.0, 1.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, loss_type="logistic_loss")

            lr = SdcaModel(CONTAINER, examples, variables, options)
            tf.initialize_all_variables().run()
            unregularized_loss = lr.unregularized_loss(examples)
            loss = lr.regularized_loss(examples)
            predictions = lr.predictions(examples)
            self.assertAllClose(0.693147, unregularized_loss.eval())
            self.assertAllClose(0.693147, loss.eval())
            train_op = lr.minimize()
            for _ in xrange(_MAX_ITERATIONS):
                train_op.run()
            # The high tolerance in unregularized_loss comparisons is due to the
            # fact that it's possible to trade off unregularized_loss vs.
            # regularization and still have a sum that is quite close to the
            # optimal regularized_loss value.  SDCA's duality gap only ensures that
            # the regularized_loss is within 0.01 of optimal.
            # 0.525457 is the optimal regularized_loss.
            # 0.411608 is the unregularized_loss at that optimum.
            self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
            self.assertAllClose(0.525457, loss.eval(), atol=0.01)
            predicted_labels = get_binary_predictions_for_logistic(predictions)
            self.assertAllEqual([0, 1], predicted_labels.eval())
            self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
开发者ID:cartland,项目名称:tensorflow,代码行数:36,代码来源:sdca_ops_test.py

示例7: testImbalancedWithExampleWeights

# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import approximate_duality_gap [as 别名]
    def testImbalancedWithExampleWeights(self):
        # Setup test data with 1 positive, and 1 negative example.
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, 0),
            make_example_proto({"age": [1], "gender": [1]}, 1),
        ]
        example_weights = [3.0, 1.0]
        for num_shards in _SHARD_NUMBERS:
            with self._single_threaded_test_session():
                examples = make_example_dict(example_protos, example_weights)
                variables = make_variable_dict(1, 1)
                options = dict(
                    symmetric_l2_regularization=1,
                    symmetric_l1_regularization=0,
                    num_table_shards=num_shards,
                    loss_type="logistic_loss",
                )

                lr = SdcaModel(examples, variables, options)
                tf.initialize_all_variables().run()
                unregularized_loss = lr.unregularized_loss(examples)
                loss = lr.regularized_loss(examples)
                predictions = lr.predictions(examples)
                train_op = lr.minimize()
                for _ in range(_MAX_ITERATIONS):
                    train_op.run()

                self.assertAllClose(0.284860, unregularized_loss.eval(), atol=0.08)
                self.assertAllClose(0.408044, loss.eval(), atol=0.012)
                predicted_labels = get_binary_predictions_for_logistic(predictions)
                self.assertAllEqual([0, 1], predicted_labels.eval())
                self.assertAllClose(0.0, lr.approximate_duality_gap().eval(), rtol=2e-2, atol=1e-2)
开发者ID:apollos,项目名称:tensorflow,代码行数:34,代码来源:sdca_ops_test.py

示例8: testInstancesOfOneClassOnly

# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import approximate_duality_gap [as 别名]
    def testInstancesOfOneClassOnly(self):
        # Setup test data with 1 positive (ignored), and 1 negative example.
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, 0),
            make_example_proto({"age": [1], "gender": [0]}, 1),  # Shares gender with the instance above.
        ]
        example_weights = [1.0, 0.0]  # Second example "omitted" from training.
        for num_shards in _SHARD_NUMBERS:
            with self._single_threaded_test_session():
                examples = make_example_dict(example_protos, example_weights)
                variables = make_variable_dict(1, 1)
                options = dict(
                    symmetric_l2_regularization=1,
                    symmetric_l1_regularization=0,
                    num_table_shards=num_shards,
                    loss_type="logistic_loss",
                )

                lr = SdcaModel(examples, variables, options)
                tf.initialize_all_variables().run()
                unregularized_loss = lr.unregularized_loss(examples)
                loss = lr.regularized_loss(examples)
                predictions = lr.predictions(examples)
                train_op = lr.minimize()
                for _ in range(_MAX_ITERATIONS):
                    train_op.run()
                self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
                self.assertAllClose(0.525457, loss.eval(), atol=0.01)
                predicted_labels = get_binary_predictions_for_logistic(predictions)
                self.assertAllEqual([0, 0], predicted_labels.eval())
                self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
开发者ID:apollos,项目名称:tensorflow,代码行数:33,代码来源:sdca_ops_test.py

示例9: testSimpleNoL2

# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import approximate_duality_gap [as 别名]
    def testSimpleNoL2(self):
        # Same as test above (so comments from above apply) but without an L2.
        # The algorithm should behave as if we have an L2 of 1 in optimization but
        # 0 in regularized_loss.
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, 0),
            make_example_proto({"age": [1], "gender": [1]}, 1),
        ]
        example_weights = [1.0, 1.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=0, symmetric_l1_regularization=0, loss_type="logistic_loss")

            lr = SdcaModel(CONTAINER, examples, variables, options)
            tf.initialize_all_variables().run()
            unregularized_loss = lr.unregularized_loss(examples)
            loss = lr.regularized_loss(examples)
            predictions = lr.predictions(examples)
            self.assertAllClose(0.693147, unregularized_loss.eval())
            self.assertAllClose(0.693147, loss.eval())
            train_op = lr.minimize()
            for _ in xrange(_MAX_ITERATIONS):
                train_op.run()

            # There is neither L1 nor L2 loss, so regularized and unregularized losses
            # should be exactly the same.
            self.assertAllClose(0.40244, unregularized_loss.eval(), atol=0.01)
            self.assertAllClose(0.40244, loss.eval(), atol=0.01)
            predicted_labels = get_binary_predictions_for_logistic(predictions)
            self.assertAllEqual([0, 1], predicted_labels.eval())
            self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
开发者ID:cartland,项目名称:tensorflow,代码行数:34,代码来源:sdca_ops_test.py

示例10: testSparseRandom

# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import approximate_duality_gap [as 别名]
  def testSparseRandom(self):
    dim = 20
    num_examples = 1000
    # Number of non-zero features per example.
    non_zeros = 10
    # Setup test data.
    with self._single_threaded_test_session():
      examples, variables = make_random_examples_and_variables_dicts(
          num_examples, dim, non_zeros)
      options = dict(
          symmetric_l2_regularization=.1,
          symmetric_l1_regularization=0,
          num_table_shards=1,
          adaptive=False,
          loss_type='logistic_loss')

      lr = SdcaModel(examples, variables, options)
      variables_lib.global_variables_initializer().run()
      train_op = lr.minimize()
      for _ in range(4):
        train_op.run()
      lr.update_weights(train_op).run()
      # Duality gap is 1.4e-5.
      # It would be 0.01 without shuffling and 0.02 with adaptive sampling.
      self.assertNear(0.0, lr.approximate_duality_gap().eval(), err=1e-3)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:27,代码来源:sdca_ops_test.py

示例11: testInstancesOfOneClassOnly

# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import approximate_duality_gap [as 别名]
  def testInstancesOfOneClassOnly(self):
    # Setup test data with 1 positive (ignored), and 1 negative example.
    example_protos = [
        make_example_proto(
            {'age': [0],
             'gender': [0]}, 0),
        make_example_proto(
            {'age': [1],
             'gender': [0]}, 1),  # Shares gender with the instance above.
    ]
    example_weights = [1.0, 0.0]  # Second example "omitted" from training.
    with self._single_threaded_test_session():
      examples = make_example_dict(example_protos, example_weights)
      variables = make_variable_dict(1, 1)
      options = dict(symmetric_l2_regularization=1,
                     symmetric_l1_regularization=0,
                     loss_type='logistic_loss')

      lr = SdcaModel(CONTAINER, examples, variables, options)
      tf.initialize_all_variables().run()
      unregularized_loss = lr.unregularized_loss(examples)
      loss = lr.regularized_loss(examples)
      predictions = lr.predictions(examples)
      for _ in xrange(5):
        lr.minimize().run()
      self.assertAllClose(0.411608, unregularized_loss.eval(), rtol=0.12)
      self.assertAllClose(0.525457, loss.eval(), atol=0.01)
      predicted_labels = get_binary_predictions_for_logistic(predictions)
      self.assertAllEqual([0, 0], predicted_labels.eval())
      self.assertAllClose(0.01,
                          lr.approximate_duality_gap().eval(),
                          rtol=1e-2,
                          atol=1e-2)
开发者ID:CPostelnicu,项目名称:tensorflow,代码行数:35,代码来源:sdca_ops_test.py

示例12: testImbalancedWithExampleWeights

# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import approximate_duality_gap [as 别名]
  def testImbalancedWithExampleWeights(self):
    # Setup test data with 1 positive, and 1 negative example.
    example_protos = [
        make_example_proto(
            {'age': [0],
             'gender': [0]}, 0),
        make_example_proto(
            {'age': [1],
             'gender': [1]}, 1),
    ]
    example_weights = [3.0, 1.0]
    with self._single_threaded_test_session():
      examples = make_example_dict(example_protos, example_weights)
      variables = make_variable_dict(1, 1)
      options = dict(symmetric_l2_regularization=1,
                     symmetric_l1_regularization=0,
                     loss_type='logistic_loss')

      lr = SdcaModel(CONTAINER, examples, variables, options)
      tf.initialize_all_variables().run()
      unregularized_loss = lr.unregularized_loss(examples)
      loss = lr.regularized_loss(examples)
      predictions = lr.predictions(examples)
      for _ in xrange(5):
        lr.minimize().run()
      self.assertAllClose(0.284860, unregularized_loss.eval(), rtol=0.08)
      self.assertAllClose(0.408044, loss.eval(), atol=0.012)
      predicted_labels = get_binary_predictions_for_logistic(predictions)
      self.assertAllEqual([0, 1], predicted_labels.eval())
      self.assertAllClose(0.01,
                          lr.approximate_duality_gap().eval(),
                          rtol=1e-2,
                          atol=1e-2)
开发者ID:CPostelnicu,项目名称:tensorflow,代码行数:35,代码来源:sdca_ops_test.py

示例13: testDistributedSimple

# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import approximate_duality_gap [as 别名]
  def testDistributedSimple(self):
    # Setup test data
    example_protos = [
        make_example_proto({'age': [0],
                            'gender': [0]}, 0),
        make_example_proto({'age': [1],
                            'gender': [1]}, 1),
    ]
    example_weights = [1.0, 1.0]
    for num_shards in _SHARD_NUMBERS:
      for num_loss_partitions in _NUM_LOSS_PARTITIONS:
        with self._single_threaded_test_session():
          examples = make_example_dict(example_protos, example_weights)
          variables = make_variable_dict(1, 1)
          options = dict(
              symmetric_l2_regularization=1,
              symmetric_l1_regularization=0,
              loss_type='logistic_loss',
              num_table_shards=num_shards,
              num_loss_partitions=num_loss_partitions)

          lr = SdcaModel(examples, variables, options)
          tf.global_variables_initializer().run()
          unregularized_loss = lr.unregularized_loss(examples)
          loss = lr.regularized_loss(examples)
          predictions = lr.predictions(examples)
          self.assertAllClose(0.693147, unregularized_loss.eval())
          self.assertAllClose(0.693147, loss.eval())

          train_op = lr.minimize()

          def Minimize():
            with self._single_threaded_test_session():
              for _ in range(_MAX_ITERATIONS):
                train_op.run()

          threads = []
          for _ in range(num_loss_partitions):
            threads.append(Thread(target=Minimize))
            threads[-1].start()

          for t in threads:
            t.join()
          lr.update_weights(train_op).run()

          # The high tolerance in unregularized_loss comparisons is due to the
          # fact that it's possible to trade off unregularized_loss vs.
          # regularization and still have a sum that is quite close to the
          # optimal regularized_loss value.  SDCA's duality gap only ensures
          # that the regularized_loss is within 0.01 of optimal.
          # 0.525457 is the optimal regularized_loss.
          # 0.411608 is the unregularized_loss at that optimum.
          self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
          self.assertAllClose(0.525457, loss.eval(), atol=0.01)
          predicted_labels = get_binary_predictions_for_logistic(predictions)
          self.assertAllEqual([0, 1], predicted_labels.eval())
          self.assertTrue(lr.approximate_duality_gap().eval() < 0.02)
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:59,代码来源:sdca_ops_test.py

示例14: testImbalanced

# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import approximate_duality_gap [as 别名]
  def testImbalanced(self):
    # Setup test data with 1 positive, and 3 negative examples.
    example_protos = [
        make_example_proto({
            'age': [0],
            'gender': [0]
        }, 0),
        make_example_proto({
            'age': [2],
            'gender': [0]
        }, 0),
        make_example_proto({
            'age': [3],
            'gender': [0]
        }, 0),
        make_example_proto({
            'age': [1],
            'gender': [1]
        }, 1),
    ]
    example_weights = [1.0, 1.0, 1.0, 1.0]
    for num_shards in _SHARD_NUMBERS:
      with self._single_threaded_test_session():
        examples = make_example_dict(example_protos, example_weights)
        variables = make_variable_dict(3, 1)
        options = dict(
            symmetric_l2_regularization=1,
            symmetric_l1_regularization=0,
            num_table_shards=num_shards,
            loss_type='logistic_loss')

        lr = SdcaModel(examples, variables, options)
        variables_lib.global_variables_initializer().run()
        unregularized_loss = lr.unregularized_loss(examples)
        loss = lr.regularized_loss(examples)
        predictions = lr.predictions(examples)
        train_op = lr.minimize()
        for _ in range(_MAX_ITERATIONS):
          train_op.run()
        lr.update_weights(train_op).run()

        self.assertAllClose(
            0.226487 + 0.102902, unregularized_loss.eval(), atol=0.08)
        self.assertAllClose(0.328394 + 0.131364, loss.eval(), atol=0.01)
        predicted_labels = get_binary_predictions_for_logistic(predictions)
        self.assertAllEqual([0, 0, 0, 1], predicted_labels.eval())
        self.assertAllClose(
            0.0, lr.approximate_duality_gap().eval(), rtol=2e-2, atol=1e-2)
开发者ID:Immexxx,项目名称:tensorflow,代码行数:50,代码来源:sdca_ops_test.py

示例15: testSimple

# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import approximate_duality_gap [as 别名]
  def testSimple(self):
    # Setup test data
    example_protos = [
        make_example_proto({
            'age': [0],
            'gender': [0]
        }, 0),
        make_example_proto({
            'age': [1],
            'gender': [1]
        }, 2),
    ]
    example_weights = [100.0, 100.0]
    with self._single_threaded_test_session():
      examples = make_example_dict(example_protos, example_weights)
      variables = make_variable_dict(1, 1)
      options = dict(
          symmetric_l2_regularization=1.0,
          symmetric_l1_regularization=0,
          loss_type='poisson_loss')
      model = SdcaModel(examples, variables, options)
      variables_lib.global_variables_initializer().run()

      # Before minimization, the weights default to zero. There is no loss due
      # to regularization, only unregularized loss which is 1 for each example.
      predictions = model.predictions(examples)
      self.assertAllClose([1.0, 1.0], predictions.eval())
      unregularized_loss = model.unregularized_loss(examples)
      regularized_loss = model.regularized_loss(examples)
      approximate_duality_gap = model.approximate_duality_gap()
      self.assertAllClose(1.0, unregularized_loss.eval())
      self.assertAllClose(1.0, regularized_loss.eval())

      # There are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender
      # (say w3 and w4). The minimization leads to:
      # w1=w3=-1.96487, argmin of 100*(exp(2*w)-2*w*0)+w**2.
      # w2=w4=0.345708, argmin of 100*(exp(2*w)-2*w*2)+w**2.
      # This gives an unregularized loss of .3167 and .3366 with regularization.
      train_op = model.minimize()
      for _ in range(_MAX_ITERATIONS):
        train_op.run()
      model.update_weights(train_op).run()

      self.assertAllClose([0.0196, 1.9965], predictions.eval(), atol=1e-4)
      self.assertAllClose(0.3167, unregularized_loss.eval(), atol=1e-4)
      self.assertAllClose(0.3366, regularized_loss.eval(), atol=1e-4)
      self.assertAllClose(0., approximate_duality_gap.eval(), atol=1e-6)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:49,代码来源:sdca_ops_test.py


注:本文中的tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel.approximate_duality_gap方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。