本文整理汇总了Python中tensorflow.python.ops.nn_impl._compute_sampled_logits函数的典型用法代码示例。如果您正苦于以下问题:Python _compute_sampled_logits函数的具体用法?Python _compute_sampled_logits怎么用?Python _compute_sampled_logits使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了_compute_sampled_logits函数的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testSubtractLogQ
def testSubtractLogQ(self):
"""With subtract_log_q, no accidental hit removal."""
np.random.seed(0)
num_classes = 5
batch_size = 3
with self.test_session() as sess:
for num_true in range(1, 5):
labels = np.random.randint(
low=0, high=num_classes, size=batch_size * num_true)
(weights, biases, hidden_acts, sampled_vals, exp_logits,
exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=num_true,
labels=labels,
sampled=[1, 0, 2, 3],
subtract_log_q=True)
logits_tensor, labels_tensor = _compute_sampled_logits(
weights=constant_op.constant(weights),
biases=constant_op.constant(biases),
labels=constant_op.constant(
labels, dtype=dtypes.int64, shape=(batch_size, num_true)),
inputs=constant_op.constant(hidden_acts),
num_sampled=4,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_vals,
subtract_log_q=True,
remove_accidental_hits=False,
partition_strategy="div",
name="sampled_logits_subtract_log_q_num_true_%d" % num_true)
got_logits, got_labels = sess.run([logits_tensor, labels_tensor])
self.assertAllClose(exp_logits, got_logits, self._eps)
self.assertAllClose(exp_labels, got_labels, self._eps)
示例2: testShapes
def testShapes(self):
np.random.seed(0)
num_classes = 5
batch_size = 3
for num_true in range(1, 5):
labels = np.random.randint(
low=0, high=num_classes, size=batch_size * num_true)
(weights, biases, hidden_acts, sampled_vals, exp_logits,
exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=num_true,
labels=labels,
sampled=[1, 0, 2, 3],
subtract_log_q=False)
logits_tensor, labels_tensor = _compute_sampled_logits(
weights=constant_op.constant(weights),
biases=constant_op.constant(biases),
labels=constant_op.constant(
labels, dtype=dtypes.int64, shape=(batch_size, num_true)),
inputs=constant_op.constant(hidden_acts),
num_sampled=4,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_vals,
subtract_log_q=False,
remove_accidental_hits=False,
partition_strategy="div",
name="sampled_logits_basic_num_true_%d" % num_true)
got_logits, got_labels = self.evaluate([logits_tensor, labels_tensor])
self.assertEqual(exp_logits.shape, got_logits.shape, self._eps)
self.assertEqual(exp_labels.shape, got_labels.shape, self._eps)
示例3: _ComputeSampledLogitsTF
def _ComputeSampledLogitsTF(self, weights, biases, hidden_acts, labels,
num_sampled, num_classes, num_true, sampled_vals,
subtract_log_q, remove_accidental_hits,
name="sampled_loss_TF"):
# Should be called from within a `with test_session():` block
if isinstance(weights, list):
weights_tf = [tf.constant(shard) for shard in weights]
else:
weights_tf = tf.constant(weights)
biases_tf = tf.constant(biases)
hidden_acts_tf = tf.constant(hidden_acts,
shape=(self._batch_size, self._dim))
labels_tf = tf.constant(labels,
dtype=tf.int64,
shape=(self._batch_size, num_true))
pred_logits_tf, pred_labels_tf = _compute_sampled_logits(
weights_tf,
biases_tf,
hidden_acts_tf,
labels_tf,
num_sampled,
num_classes,
num_true,
sampled_vals,
subtract_log_q=subtract_log_q,
remove_accidental_hits=remove_accidental_hits,
name=name)
return pred_logits_tf, pred_labels_tf
示例4: testAccidentalHitRemoval
def testAccidentalHitRemoval(self):
"""With accidental hit removal, no subtract_log_q."""
np.random.seed(0)
num_classes = 5
batch_size = 3
sampled = [1, 0, 2, 3]
with self.test_session():
for num_true in range(1, 5):
labels = np.random.randint(
low=0, high=num_classes, size=batch_size * num_true)
(weights, biases, hidden_acts, sampled_vals, _,
_) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=num_true,
labels=labels,
sampled=sampled,
subtract_log_q=False)
logits_tensor, _ = _compute_sampled_logits(
weights=constant_op.constant(weights),
biases=constant_op.constant(biases),
labels=constant_op.constant(
labels, dtype=dtypes.int64, shape=(batch_size, num_true)),
inputs=constant_op.constant(hidden_acts),
num_sampled=len(sampled),
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_vals,
subtract_log_q=False,
remove_accidental_hits=True,
partition_strategy="div",
colocate_logits=False,
name="sampled_logits_accidental_hit_removal_num_true_%d" % num_true)
# Test that the exponentiated logits of accidental hits are near 0.
# First we need to find the hits in this random test run:
labels_reshape = labels.reshape((batch_size, num_true))
got_logits = logits_tensor.eval()
for row in xrange(batch_size):
row_labels = labels_reshape[row, :]
for col in xrange(len(sampled)):
if sampled[col] in row_labels:
# We need to add the num_true_test offset into logits_*
self.assertNear(
np.exp(got_logits[row, col + num_true]), 0., self._eps)
示例5: testShardedColocatedLogits
def testShardedColocatedLogits(self):
"""Sharded weights and biases and with colocated logit computation."""
np.random.seed(0)
num_classes = 5
batch_size = 3
with self.test_session() as sess:
for num_true in range(1, 5):
labels = np.random.randint(
low=0, high=num_classes, size=batch_size * num_true)
(weights, biases, hidden_acts, sampled_vals, exp_logits,
exp_labels) = self._GenerateTestData(
num_classes=num_classes,
dim=10,
batch_size=batch_size,
num_true=num_true,
labels=labels,
sampled=[1, 0, 2, 3],
subtract_log_q=False)
weight_shards, bias_shards = self._ShardTestEmbeddings(
weights, biases, num_shards=3)
logits_tensor, labels_tensor = _compute_sampled_logits(
weights=[constant_op.constant(shard) for shard in weight_shards],
biases=[constant_op.constant(shard) for shard in bias_shards],
labels=constant_op.constant(
labels, dtype=dtypes.int64, shape=(batch_size, num_true)),
inputs=constant_op.constant(hidden_acts),
num_sampled=4,
num_classes=num_classes,
num_true=num_true,
sampled_values=sampled_vals,
subtract_log_q=False,
remove_accidental_hits=False,
partition_strategy="div",
colocate_logits=True,
name="sampled_logits_sharded_colocated_num_true_%d" % num_true)
got_logits, got_labels = sess.run([logits_tensor, labels_tensor])
self.assertAllClose(exp_logits, got_logits, self._eps)
self.assertAllClose(exp_labels, got_labels, self._eps)
示例6: sampled_sparse_softmax_loss
def sampled_sparse_softmax_loss(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
sampled_values=None,
remove_accidental_hits=True,
partition_strategy="mod",
name="sampled_sparse_softmax_loss"):
"""Computes and returns the sampled sparse softmax training loss.
This is a faster way to train a softmax classifier over a huge number of
classes.
This operation is for training only. It is generally an underestimate of
the full softmax loss.
A common use case is to use this method for training, and calculate the full
softmax loss for evaluation or inference. In this case, you must set
`partition_strategy="div"` for the two losses to be consistent, as in the
following example:
```python
if mode == "train":
loss = tf.nn.sampled_sparse_softmax_loss(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
...,
partition_strategy="div")
elif mode == "eval":
logits = tf.matmul(inputs, tf.transpose(weights))
logits = tf.nn.bias_add(logits, biases)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.squeeze(labels),
logits=logits)
```
See our [Candidate Sampling Algorithms Reference]
(https://www.tensorflow.org/extras/candidate_sampling.pdf)
Also see Section 3 of [Jean et al., 2014](http://arxiv.org/abs/1412.2007)
([pdf](http://arxiv.org/pdf/1412.2007.pdf)) for the math.
Args:
weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
[num_classes, dim]. The (possibly-sharded) class embeddings.
biases: A `Tensor` of shape `[num_classes]`. The class biases.
labels: A `Tensor` of type `int64` and shape `[batch_size, 1]`.
The index of the single target class for each row of logits. Note that
this format differs from the `labels` argument of
`nn.sparse_softmax_cross_entropy_with_logits`.
inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,
`sampled_expected_count`) returned by a `*_candidate_sampler` function.
(if None, we default to `log_uniform_candidate_sampler`)
remove_accidental_hits: A `bool`. whether to remove "accidental hits"
where a sampled class equals one of the target classes. Default is
True.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
Returns:
A `batch_size` 1-D tensor of per-example sampled softmax losses.
"""
logits, _ = nn_impl._compute_sampled_logits(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
num_sampled=num_sampled,
num_classes=num_classes,
num_true=1,
sampled_values=sampled_values,
subtract_log_q=True,
remove_accidental_hits=remove_accidental_hits,
partition_strategy=partition_strategy,
name=name)
# There is only one true label. _compute_sampled_logits puts the true logit
# at index 0.
labels = array_ops.zeros([array_ops.shape(logits)[0], 1], dtype=dtypes.int64)
sampled_losses = nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=array_ops.squeeze(labels), logits=logits)
# sampled_losses is a [batch_size] tensor.
return sampled_losses