本文整理汇总了Python中tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel.minimize方法的典型用法代码示例。如果您正苦于以下问题:Python SdcaModel.minimize方法的具体用法?Python SdcaModel.minimize怎么用?Python SdcaModel.minimize使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel
的用法示例。
在下文中一共展示了SdcaModel.minimize方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testInstancesOfOneClassOnly
# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import minimize [as 别名]
def testInstancesOfOneClassOnly(self):
# Setup test data with 1 positive (ignored), and 1 negative example.
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [0]}, 1), # Shares gender with the instance above.
]
example_weights = [1.0, 0.0] # Second example "omitted" from training.
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=0.25,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
tf.initialize_all_variables().run()
lr = SdcaModel(CONTAINER, examples, variables, options)
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
prediction = lr.predictions(examples)
lr.minimize().run()
self.assertAllClose(0.395226,
unregularized_loss.eval(),
rtol=3e-2,
atol=3e-2)
self.assertAllClose(0.460781, loss.eval(), rtol=3e-2, atol=3e-2)
predicted_labels = tf.cast(
tf.greater_equal(prediction,
tf.ones_like(prediction) * 0.5), tf.float32)
self.assertAllEqual([0, 0], predicted_labels.eval())
示例2: testDenseFeatures
# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import minimize [as 别名]
def testDenseFeatures(self):
with self._single_threaded_test_session():
examples = make_dense_examples_dict(
dense_feature_values=[[-2.0, 0.0], [0.0, 2.0]],
weights=[1.0, 1.0],
labels=[-10.0, 14.0])
variables = make_dense_variable_dict(2, 2)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
predictions = lr.predictions(examples)
for _ in xrange(20):
lr.minimize().run()
# Predictions should be 4/5 of label due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * weight^2
self.assertAllClose([-10.0 * 4 / 5, 14.0 * 4 / 5],
predictions.eval(),
rtol=0.01)
loss = lr.regularized_loss(examples)
self.assertAllClose(148.0 / 10.0, loss.eval(), atol=0.01)
示例3: testDenseFeaturesPerfectlySeparable
# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import minimize [as 别名]
def testDenseFeaturesPerfectlySeparable(self):
with self._single_threaded_test_session():
examples = make_dense_examples_dict(
dense_feature_values=[[1.0, 1.0], [1.0, -1.0]],
weights=[1.0, 1.0],
labels=[1.0, 0.0])
variables = make_dense_variable_dict(2, 2)
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
for _ in xrange(5):
model.minimize().run()
self.assertAllClose([1.0, -1.0], predictions.eval(), atol=0.05)
self.assertAllClose([1.0, 0.0], binary_predictions.eval())
# (1.0, 1.0) and (1.0, -1.0) are perfectly separable by x-axis (that is,
# the SVM's functional margin >=1), so the unregularized loss is ~0.0.
# There is only loss due to l2-regularization. For these datapoints, it
# turns out that w_1~=0.0 and w_2~=1.0 which means that l2 loss is ~0.25.
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.0, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.25, regularized_loss.eval(), atol=0.02)
示例4: testSimple
# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import minimize [as 别名]
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, -10.0),
make_example_proto(
{'age': [1],
'gender': [1]}, 14.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
predictions = lr.predictions(examples)
for _ in xrange(20):
lr.minimize().run()
# Predictions should be 2/3 of label due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2
self.assertAllClose([-20.0 / 3.0, 28.0 / 3.0],
predictions.eval(),
rtol=0.005)
self.assertAllClose(0.01,
lr.approximate_duality_gap().eval(),
rtol=1e-2,
atol=1e-2)
示例5: testL1Regularization
# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import minimize [as 别名]
def testL1Regularization(self):
# Setup test data
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, -10.0),
make_example_proto(
{'age': [1],
'gender': [1]}, 14.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=4.0,
loss_type='squared_loss')
lr = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
prediction = lr.predictions(examples)
loss = lr.regularized_loss(examples)
for _ in xrange(5):
lr.minimize().run()
# Predictions should be -4.0, 48/5 due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2 + L1 * 4 * weight
self.assertAllClose([-4.0, 20.0 / 3.0], prediction.eval(), rtol=0.08)
# Loss should be the sum of the regularized loss value from above per
# example after plugging in the optimal weights.
self.assertAllClose(308.0 / 6.0, loss.eval(), atol=0.01)
示例6: testNoWeightedExamples
# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import minimize [as 别名]
def testNoWeightedExamples(self):
# Setup test data with 1 positive, and 1 negative example.
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, 0),
make_example_proto(
{'age': [1],
'gender': [1]}, 1),
]
# Zeroed out example weights.
example_weights = [0.0, 0.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=0.5,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
tf.initialize_all_variables().run()
lr = SdcaModel(CONTAINER, examples, variables, options)
self.assertAllClose([0.5, 0.5], lr.predictions(examples).eval())
with self.assertRaisesOpError(
'No weighted examples in 2 training examples'):
lr.minimize().run()
self.assertAllClose([0.5, 0.5], lr.predictions(examples).eval())
示例7: testLinearFeatureValues
# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import minimize [as 别名]
def testLinearFeatureValues(self):
# Setup test data
example_protos = [
make_example_proto(
{'age': [0],
'gender': [0]}, -10.0, -2.0),
make_example_proto(
{'age': [1],
'gender': [1]}, 14.0, 2.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=0.5,
symmetric_l1_regularization=0,
loss_type='squared_loss',
prior=0.0)
tf.initialize_all_variables().run()
lr = SdcaModel(CONTAINER, examples, variables, options)
prediction = lr.predictions(examples)
lr.minimize().run()
# Predictions should be 8/9 of label due to minimizing regularized loss:
# (label - 2 * 2 * weight)^2 / 2 + L2 * 2 * weight^2
self.assertAllClose([-10.0 * 8 / 9, 14.0 * 8 / 9],
prediction.eval(),
rtol=0.07)
示例8: testImbalanced
# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import minimize [as 别名]
def testImbalanced(self):
# Setup test data with 1 positive, and 3 negative examples.
example_protos = [
make_example_proto({"age": [0], "gender": [0]}, 0),
make_example_proto({"age": [2], "gender": [0]}, 0),
make_example_proto({"age": [3], "gender": [0]}, 0),
make_example_proto({"age": [1], "gender": [1]}, 1),
]
example_weights = [1.0, 1.0, 1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(3, 1)
options = dict(
symmetric_l2_regularization=1, symmetric_l1_regularization=0, loss_type="logistic_loss", prior=-1.09861
)
tf.initialize_all_variables().run()
lr = SdcaModel(CONTAINER, examples, variables, options)
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
prediction = lr.predictions(examples)
lr.minimize().run()
self.assertAllClose(0.331710, unregularized_loss.eval(), rtol=3e-2, atol=3e-2)
self.assertAllClose(0.591295, loss.eval(), rtol=3e-2, atol=3e-2)
predicted_labels = tf.cast(tf.greater_equal(prediction, tf.ones_like(prediction) * 0.5), tf.float32)
self.assertAllEqual([0, 0, 0, 1], predicted_labels.eval())
示例9: testLinearRegularization
# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import minimize [as 别名]
def testLinearRegularization(self):
# Setup test data
example_protos = [
# 2 identical examples
make_example_proto({"age": [0], "gender": [0]}, -10.0),
make_example_proto({"age": [0], "gender": [0]}, -10.0),
# 2 more identical examples
make_example_proto({"age": [1], "gender": [1]}, 14.0),
make_example_proto({"age": [1], "gender": [1]}, 14.0),
]
example_weights = [1.0, 1.0, 1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=16, symmetric_l1_regularization=0, loss_type="squared_loss", prior=0.0
)
tf.initialize_all_variables().run()
lr = SdcaModel(CONTAINER, examples, variables, options)
prediction = lr.predictions(examples)
lr.minimize().run()
# Predictions should be 1/5 of label due to minimizing regularized loss:
# (label - 2 * weight)^2 + L2 * 16 * weight^2
optimal1 = -10.0 / 5.0
optimal2 = 14.0 / 5.0
self.assertAllClose([optimal1, optimal1, optimal2, optimal2], prediction.eval(), rtol=0.01)
示例10: testImbalanced
# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import minimize [as 别名]
def testImbalanced(self):
# Setup test data with 1 positive, and 3 negative examples.
example_protos = [
make_example_proto({"age": [0], "gender": [0]}, 0),
make_example_proto({"age": [2], "gender": [0]}, 0),
make_example_proto({"age": [3], "gender": [0]}, 0),
make_example_proto({"age": [1], "gender": [1]}, 1),
]
example_weights = [1.0, 1.0, 1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(3, 1)
options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, loss_type="logistic_loss")
lr = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
for _ in xrange(5):
lr.minimize().run()
self.assertAllClose(0.226487 + 0.102902, unregularized_loss.eval(), rtol=0.08)
self.assertAllClose(0.328394 + 0.131364, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 0, 0, 1], predicted_labels.eval())
self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
示例11: testSomeUnweightedExamples
# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import minimize [as 别名]
def testSomeUnweightedExamples(self):
# Setup test data with 4 examples, but should produce the same
# results as testSimple.
example_protos = [
# Will be used.
make_example_proto({"age": [0], "gender": [0]}, 0),
# Will be ignored.
make_example_proto({"age": [1], "gender": [0]}, 0),
# Will be used.
make_example_proto({"age": [1], "gender": [1]}, 1),
# Will be ignored.
make_example_proto({"age": [1], "gender": [0]}, 1),
]
example_weights = [1.0, 0.0, 1.0, 0.0]
with self._single_threaded_test_session():
# Only use examples 0 and 2
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, loss_type="logistic_loss")
tf.initialize_all_variables().run()
lr = SdcaModel(CONTAINER, examples, variables, options)
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
prediction = lr.predictions(examples)
lr.minimize().run()
self.assertAllClose(0.395226, unregularized_loss.eval(), rtol=3e-2, atol=3e-2)
self.assertAllClose(0.657446, loss.eval(), rtol=3e-2, atol=3e-2)
predicted_labels = tf.cast(tf.greater_equal(prediction, tf.ones_like(prediction) * 0.5), tf.float32)
self.assertAllClose([0, 1, 1, 1], predicted_labels.eval())
示例12: testSomeUnweightedExamples
# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import minimize [as 别名]
def testSomeUnweightedExamples(self):
# Setup test data with 4 examples, but should produce the same
# results as testSimple.
example_protos = [
# Will be used.
make_example_proto({"age": [0], "gender": [0]}, 0),
# Will be ignored.
make_example_proto({"age": [1], "gender": [0]}, 0),
# Will be used.
make_example_proto({"age": [1], "gender": [1]}, 1),
# Will be ignored.
make_example_proto({"age": [1], "gender": [0]}, 1),
]
example_weights = [1.0, 0.0, 1.0, 0.0]
with self._single_threaded_test_session():
# Only use examples 0 and 2
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, loss_type="logistic_loss")
lr = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
for _ in xrange(5):
lr.minimize().run()
self.assertAllClose(0.411608, unregularized_loss.eval(), rtol=0.12)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllClose([0, 1, 1, 1], predicted_labels.eval())
self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
示例13: testSimpleNoL2
# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import minimize [as 别名]
def testSimpleNoL2(self):
# Same as test above (so comments from above apply) but without an L2.
# The algorithm should behave as if we have an L2 of 1 in optimization but
# 0 in regularized_loss.
example_protos = [
make_example_proto({"age": [0], "gender": [0]}, 0),
make_example_proto({"age": [1], "gender": [1]}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=0, symmetric_l1_regularization=0, loss_type="logistic_loss")
lr = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
for _ in xrange(5):
lr.minimize().run()
self.assertAllClose(0.411608, unregularized_loss.eval(), rtol=0.11)
self.assertAllClose(0.371705, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
示例14: testSimple
# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import minimize [as 别名]
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({"age": [0], "gender": [0]}, 0),
make_example_proto({"age": [1], "gender": [1]}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, loss_type="logistic_loss")
lr = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
for _ in xrange(5):
lr.minimize().run()
# The high tolerance in unregularized_loss comparisons is due to the
# fact that it's possible to trade off unregularized_loss vs.
# regularization and still have a sum that is quite close to the
# optimal regularized_loss value. SDCA's duality gap only ensures that
# the regularized_loss is within 0.01 of optimal.
# 0.525457 is the optimal regularized_loss.
# 0.411608 is the unregularized_loss at that optimum.
self.assertAllClose(0.411608, unregularized_loss.eval(), rtol=0.11)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
示例15: testDenseFeaturesWeightedExamples
# 需要导入模块: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel [as 别名]
# 或者: from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel import minimize [as 别名]
def testDenseFeaturesWeightedExamples(self):
with self._single_threaded_test_session():
examples = make_dense_examples_dict(
dense_feature_values=[[1.0, 1.0], [0.5, -0.5]],
weights=[3.0, 1.0],
labels=[1.0, 0.0])
variables = make_dense_variable_dict(2, 2)
options = dict(symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(CONTAINER, examples, variables, options)
tf.initialize_all_variables().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
for _ in xrange(5):
model.minimize().run()
# Point (1.0, 0.5) has higher weight than (1.0, -0.5) so the model will
# try to increase the margin from (1.0, 0.5). Due to regularization,
# (1.0, -0.5) will be within the margin. For these points and example
# weights, the optimal weights are w_1~=0.4 and w_2~=1.2 which give an L2
# loss of 0.5 * 0.25 * 0.25 * 1.6 = 0.2. The binary predictions will be
# correct, but the boundary will be much closer to the 2nd point than the
# first one.
self.assertAllClose([1.0, -0.2], predictions.eval(), atol=0.05)
self.assertAllClose([1.0, 0.0], binary_predictions.eval(), atol=0.05)
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.2, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.4, regularized_loss.eval(), atol=0.02)