本文整理汇总了Python中tensorflow.examples.tutorials.mnist.mnist.loss方法的典型用法代码示例。如果您正苦于以下问题:Python mnist.loss方法的具体用法?Python mnist.loss怎么用?Python mnist.loss使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.examples.tutorials.mnist.mnist
的用法示例。
在下文中一共展示了mnist.loss方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from tensorflow.examples.tutorials.mnist import mnist [as 别名]
# 或者: from tensorflow.examples.tutorials.mnist.mnist import loss [as 别名]
def main(unused_argv):
if FLAGS.log_dir is None or FLAGS.log_dir == "":
raise ValueError("Must specify an explicit `log_dir`")
if FLAGS.data_dir is None or FLAGS.data_dir == "":
raise ValueError("Must specify an explicit `data_dir`")
device, target = device_and_target()
with tf.device(device):
images = tf.placeholder(tf.float32, [None, 784], name='image_input')
labels = tf.placeholder(tf.float32, [None], name='label_input')
data = read_data_sets(FLAGS.data_dir,
one_hot=False,
fake_data=False)
logits = mnist.inference(images, FLAGS.hidden1, FLAGS.hidden2)
loss = mnist.loss(logits, labels)
loss = tf.Print(loss, [loss], message="Loss = ")
train_op = mnist.training(loss, FLAGS.learning_rate)
with tf.train.MonitoredTrainingSession(
master=target,
is_chief=(FLAGS.task_index == 0),
checkpoint_dir=FLAGS.log_dir) as sess:
while not sess.should_stop():
xs, ys = data.train.next_batch(FLAGS.batch_size, fake_data=False)
sess.run(train_op, feed_dict={images:xs, labels:ys})
示例2: inference
# 需要导入模块: from tensorflow.examples.tutorials.mnist import mnist [as 别名]
# 或者: from tensorflow.examples.tutorials.mnist.mnist import loss [as 别名]
def inference(inp, num_clusters, hidden1_units, hidden2_units):
"""Build the MNIST model up to where it may be used for inference.
Args:
inp: input data
num_clusters: number of clusters of input features to train.
hidden1_units: Size of the first hidden layer.
hidden2_units: Size of the second hidden layer.
Returns:
logits: Output tensor with the computed logits.
clustering_loss: Clustering loss.
kmeans_training_op: An op to train the clustering.
"""
# Clustering
kmeans = tf.contrib.factorization.KMeans(
inp,
num_clusters,
distance_metric=tf.contrib.factorization.COSINE_DISTANCE,
# TODO(agarwal): kmeans++ is currently causing crash in dbg mode.
# Enable this after fixing.
# initial_clusters=tf.contrib.factorization.KMEANS_PLUS_PLUS_INIT,
use_mini_batch=True)
all_scores, _, clustering_scores, kmeans_training_op = kmeans.training_graph()
# Some heuristics to approximately whiten this output.
all_scores = (all_scores[0] - 0.5) * 5
# Here we avoid passing the gradients from the supervised objective back to
# the clusters by creating a stop_gradient node.
all_scores = tf.stop_gradient(all_scores)
clustering_loss = tf.reduce_sum(clustering_scores[0])
# Hidden 1
with tf.name_scope('hidden1'):
weights = tf.Variable(
tf.truncated_normal([num_clusters, hidden1_units],
stddev=1.0 / math.sqrt(float(IMAGE_PIXELS))),
name='weights')
biases = tf.Variable(tf.zeros([hidden1_units]),
name='biases')
hidden1 = tf.nn.relu(tf.matmul(all_scores, weights) + biases)
# Hidden 2
with tf.name_scope('hidden2'):
weights = tf.Variable(
tf.truncated_normal([hidden1_units, hidden2_units],
stddev=1.0 / math.sqrt(float(hidden1_units))),
name='weights')
biases = tf.Variable(tf.zeros([hidden2_units]),
name='biases')
hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)
# Linear
with tf.name_scope('softmax_linear'):
weights = tf.Variable(
tf.truncated_normal([hidden2_units, NUM_CLASSES],
stddev=1.0 / math.sqrt(float(hidden2_units))),
name='weights')
biases = tf.Variable(tf.zeros([NUM_CLASSES]),
name='biases')
logits = tf.matmul(hidden2, weights) + biases
return logits, clustering_loss, kmeans_training_op