本文整理汇总了Python中tensorflow.python.ops.losses.losses.softmax_cross_entropy函数的典型用法代码示例。如果您正苦于以下问题:Python softmax_cross_entropy函数的具体用法?Python softmax_cross_entropy怎么用?Python softmax_cross_entropy使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了softmax_cross_entropy函数的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: loop_fn
def loop_fn(i):
image = array_ops.gather(images, i)
label = array_ops.gather(labels, i)
logits = array_ops.reshape(model(image, training=training), [-1])
loss = losses.softmax_cross_entropy(
logits=logits, onehot_labels=label, reduction=losses.Reduction.NONE)
return gradient_ops.gradients(loss, variables.trainable_variables())
示例2: testRNNWithKerasGRUCell
def testRNNWithKerasGRUCell(self):
with self.cached_session() as sess:
input_shape = 10
output_shape = 5
timestep = 4
batch = 100
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = keras.utils.to_categorical(y_train)
cell = keras.layers.GRUCell(output_shape)
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape))
predict = array_ops.placeholder(
dtypes.float32, shape=(None, output_shape))
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32)
self.assertEqual(outputs.shape.as_list(), [None, timestep, output_shape])
self.assertEqual(state.shape.as_list(), [None, output_shape])
loss = losses.softmax_cross_entropy(predict, state)
train_op = training.GradientDescentOptimizer(0.001).minimize(loss)
sess.run([variables_lib.global_variables_initializer()])
_, outputs, state = sess.run(
[train_op, outputs, state], {inputs: x_train, predict: y_train})
self.assertEqual(len(outputs), batch)
self.assertEqual(len(state), batch)
示例3: softmax_classifier
def softmax_classifier(tensor_in,
labels,
weights,
biases,
class_weight=None,
name=None):
"""Returns prediction and loss for softmax classifier.
This function returns "probabilities" and a cross entropy loss. To obtain
predictions, use `tf.argmax` on the returned probabilities.
This function requires labels to be passed in one-hot encoding.
Args:
tensor_in: Input tensor, [batch_size, feature_size], features.
labels: Tensor, [batch_size, n_classes], one-hot labels of the output
classes.
weights: Tensor, [batch_size, feature_size], linear transformation
matrix.
biases: Tensor, [batch_size], biases.
class_weight: Tensor, optional, [n_classes], weight for each class.
If not given, all classes are supposed to have weight one.
name: Operation name.
Returns:
`tuple` of softmax predictions and loss `Tensor`s.
"""
with ops.name_scope(name, 'softmax_classifier', [tensor_in, labels]):
logits = nn.xw_plus_b(tensor_in, weights, biases)
if class_weight is not None:
logits = math_ops.multiply(logits, class_weight)
return nn.softmax(logits), losses.softmax_cross_entropy(labels, logits)
示例4: test_unifiedRNN_with_cond
def test_unifiedRNN_with_cond(self):
# This test is to demonstrate the graph rewrite of grappler plugin under
# the condition that the function returns different number of internal
# states.
input_shape = 10
rnn_state_size = 8
output_shape = 8
timestep = 4
batch = 100
epoch = 1
with self.cached_session(config=self.config, use_gpu=True) as sess:
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = keras.utils.to_categorical(y_train, output_shape)
layer = UnifiedLSTM(rnn_state_size)
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape), name='inputs')
predict = array_ops.placeholder(
dtypes.float32, shape=(None, output_shape), name='predict')
zeros = array_ops.zeros([batch, output_shape])
dummy_runtime = constant_op.constant(
'unknown', dtype=dtypes.string, name='runtime')
a = constant_op.constant(0)
b = constant_op.constant(1)
# Will always run the lstm layer.
outputs, runtime = control_flow_ops.cond(
gen_math_ops.less(a, b),
lambda: layer(inputs),
lambda: (zeros, dummy_runtime))
loss = losses.softmax_cross_entropy(predict, outputs)
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
train_op = optimizer.minimize(loss)
sess.run([variables.global_variables_initializer()])
existing_loss = 0
for _ in range(epoch):
loss_value, _, runtime_value = sess.run([loss, train_op, runtime], {
inputs: x_train,
predict: y_train
})
if test.is_gpu_available():
self.assertEquals(runtime_value, b'cudnn')
else:
self.assertEquals(runtime_value, b'cpu')
# Make sure the loss is updated for every epoch
# (layer weights properly updated).
self.assertNotEqual(existing_loss, loss_value)
existing_loss = loss_value
示例5: acgan_generator_loss
def acgan_generator_loss(discriminator_gen_classification_logits,
one_hot_labels,
weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""ACGAN loss for the generator.
The ACGAN loss adds a classification loss to the conditional discriminator.
Therefore, the discriminator must output a tuple consisting of
(1) the real/fake prediction and
(2) the logits for the classification (usually the last conv layer,
flattened).
For more details:
ACGAN: https://arxiv.org/abs/1610.09585
Args:
discriminator_gen_classification_logits: Classification logits for generated
data.
one_hot_labels: A Tensor holding one-hot labels for the batch.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`discriminator_gen_classification_logits`, and must be broadcastable to
`discriminator_gen_classification_logits` (i.e., all dimensions must be
either `1`, or the same as the corresponding dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.compat.v1.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. Shape depends on `reduction`.
Raises:
ValueError: if arg module not either `generator` or `discriminator`
TypeError: if the discriminator does not output a tuple.
"""
with ops.name_scope(
scope, 'acgan_generator_loss',
(discriminator_gen_classification_logits, one_hot_labels)) as scope:
loss = losses.softmax_cross_entropy(
one_hot_labels,
discriminator_gen_classification_logits,
weights=weights,
scope=scope,
loss_collection=loss_collection,
reduction=reduction)
if add_summaries:
summary.scalar('generator_ac_loss', loss)
return loss
示例6: test_unifiedRNN
def test_unifiedRNN(self):
rewrites = rewriter_config_pb2.RewriterConfig()
rewrites.function_optimization = rewriter_config_pb2.RewriterConfig.OFF
customer_optimizer = rewrites.custom_optimizers.add()
customer_optimizer.name = 'ExperimentalImplementationSelector'
rewrites.min_graph_nodes = -1
graph_options = config_pb2.GraphOptions(rewrite_options=rewrites)
config = config_pb2.ConfigProto(graph_options=graph_options)
input_shape = 10
rnn_state_size = 8
output_shape = 8
timestep = 4
batch = 100
epoch = 1
with ops.Graph().as_default(), session.Session(config=config) as sess:
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = keras.utils.to_categorical(y_train)
layer = UnifiedLSTM(rnn_state_size)
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape), name='inputs')
predict = array_ops.placeholder(
dtypes.float32, shape=(None, output_shape), name='predict')
outputs, runtime = layer(inputs)
loss = losses.softmax_cross_entropy(predict, outputs)
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
train_op = optimizer.minimize(loss)
sess.run([variables.global_variables_initializer()])
existing_loss = 0
for _ in range(epoch):
loss_value, _, runtime_value = sess.run([loss, train_op, runtime], {
inputs: x_train,
predict: y_train
})
if test.is_gpu_available():
self.assertEquals(runtime_value, b'cudnn')
else:
self.assertEquals(runtime_value, b'cpu')
# Make sure the loss is updated for every epoch
# (layer weights properly updated).
self.assertNotEqual(existing_loss, loss_value)
existing_loss = loss_value
示例7: test_unifiedRNN
def test_unifiedRNN(self):
input_shape = 10
rnn_state_size = 8
output_shape = 8
timestep = 4
batch = 100
epoch = 1
with self.cached_session(config=self.config, use_gpu=True) as sess:
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
y_train = keras.utils.to_categorical(y_train, output_shape)
layer = UnifiedLSTM(rnn_state_size)
inputs = array_ops.placeholder(
dtypes.float32, shape=(None, timestep, input_shape), name='inputs')
predict = array_ops.placeholder(
dtypes.float32, shape=(None, output_shape), name='predict')
outputs, runtime = layer(inputs)
loss = losses.softmax_cross_entropy(predict, outputs)
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
train_op = optimizer.minimize(loss)
sess.run([variables.global_variables_initializer()])
existing_loss = 0
for _ in range(epoch):
loss_value, _, runtime_value = sess.run([loss, train_op, runtime], {
inputs: x_train,
predict: y_train
})
if test.is_gpu_available():
self.assertEquals(runtime_value, b'cudnn')
else:
self.assertEquals(runtime_value, b'cpu')
# Make sure the loss is updated for every epoch
# (layer weights properly updated).
self.assertNotEqual(existing_loss, loss_value)
existing_loss = loss_value
示例8: testStaticRNNWithKerasSimpleRNNCell
def testStaticRNNWithKerasSimpleRNNCell(self):
with self.cached_session() as sess:
input_shape = 10
output_shape = 5
timestep = 4
batch = 100
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=batch,
test_samples=0,
input_shape=(timestep, input_shape),
num_classes=output_shape)
x_train = np.transpose(x_train, (1, 0, 2))
y_train = keras.utils.to_categorical(y_train)
cell = keras.layers.SimpleRNNCell(output_shape)
inputs = [array_ops.placeholder(
dtypes.float32, shape=(None, input_shape))] * timestep
predict = array_ops.placeholder(
dtypes.float32, shape=(None, output_shape))
outputs, state = rnn.static_rnn(
cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), timestep)
self.assertEqual(outputs[0].shape.as_list(), [None, output_shape])
self.assertEqual(state.shape.as_list(), [None, output_shape])
loss = losses.softmax_cross_entropy(predict, state)
train_op = training.GradientDescentOptimizer(0.001).minimize(loss)
sess.run([variables_lib.global_variables_initializer()])
feed_dict = {i: d for i, d in zip(inputs, x_train)}
feed_dict[predict] = y_train
_, outputs, state = sess.run(
[train_op, outputs, state], feed_dict)
self.assertEqual(len(outputs), timestep)
self.assertEqual(len(outputs[0]), batch)
self.assertEqual(len(state), batch)
示例9: acgan_discriminator_loss
def acgan_discriminator_loss(
discriminator_real_classification_logits,
discriminator_gen_classification_logits,
one_hot_labels,
label_smoothing=0.0,
real_weights=1.0,
generated_weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""ACGAN loss for the discriminator.
The ACGAN loss adds a classification loss to the conditional discriminator.
Therefore, the discriminator must output a tuple consisting of
(1) the real/fake prediction and
(2) the logits for the classification (usually the last conv layer,
flattened).
For more details:
ACGAN: https://arxiv.org/abs/1610.09585
Args:
discriminator_real_classification_logits: Classification logits for real
data.
discriminator_gen_classification_logits: Classification logits for generated
data.
one_hot_labels: A Tensor holding one-hot labels for the batch.
label_smoothing: A float in [0, 1]. If greater than 0, smooth the labels for
"discriminator on real data" as suggested in
https://arxiv.org/pdf/1701.00160
real_weights: Optional `Tensor` whose rank is either 0, or the same rank as
`discriminator_real_outputs`, and must be broadcastable to
`discriminator_real_outputs` (i.e., all dimensions must be either `1`, or
the same as the corresponding dimension).
generated_weights: Same as `real_weights`, but for
`discriminator_gen_classification_logits`.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. Shape depends on `reduction`.
Raises:
TypeError: If the discriminator does not output a tuple.
"""
with ops.name_scope(
scope, 'acgan_discriminator_loss',
(discriminator_real_classification_logits,
discriminator_gen_classification_logits, one_hot_labels)) as scope:
loss_on_generated = losses.softmax_cross_entropy(
one_hot_labels, discriminator_gen_classification_logits,
weights=generated_weights, scope=scope, loss_collection=None,
reduction=reduction)
loss_on_real = losses.softmax_cross_entropy(
one_hot_labels, discriminator_real_classification_logits,
weights=real_weights, label_smoothing=label_smoothing, scope=scope,
loss_collection=None, reduction=reduction)
loss = loss_on_generated + loss_on_real
util.add_loss(loss, loss_collection)
if add_summaries:
summary.scalar('discriminator_gen_ac_loss', loss_on_generated)
summary.scalar('discriminator_real_ac_loss', loss_on_real)
summary.scalar('discriminator_ac_loss', loss)
return loss