当前位置: 首页>>代码示例>>Python>>正文


Python mnist.read_data_sets函数代码示例

本文整理汇总了Python中tensorflow.contrib.learn.python.learn.datasets.mnist.read_data_sets函数的典型用法代码示例。如果您正苦于以下问题:Python read_data_sets函数的具体用法?Python read_data_sets怎么用?Python read_data_sets使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了read_data_sets函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

def main(_):
  mnist = read_data_sets(FLAGS.data_dir, one_hot=True)
  x = tf.placeholder(tf.float32, [None, 784])
  W1 = tf.Variable(tf.random_normal([784, 256]))
  b1 = tf.Variable(tf.random_normal([256]))
  W2 = tf.Variable(tf.random_normal([256, 10]))
  b2 = tf.Variable(tf.random_normal([10]))
  lay1 = tf.nn.relu(tf.matmul(x, W1) + b1)
  y = tf.add(tf.matmul(lay1, W2),b2)

  y_ = tf.placeholder(tf.float32, [None, 10])
  cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_))
  train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

  sess = tf.InteractiveSession()
  tf.global_variables_initializer().run()
  for index in range(10000): 
    # print('process the {}th batch'.format(index))
    start_train = time.time()
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
    # print('the {0} batch takes time: {1}'.format(index, time.time()-start_train))

  correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
  accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  print(sess.run(accuracy, feed_dict={x: mnist.test.images,
                                      y_: mnist.test.labels}))
开发者ID:SiyuanWei,项目名称:tensorflow-101,代码行数:27,代码来源:train_mnist_single_perceptron.py

示例2: main

def main(args):
    if args and args[0] == 'cnn':
        network_type = 'convolutional'
        reshape_data = False
        neural_network = network.ConvNet(max_epochs=1000)
        all_configurations = list(itertools.product(*configurations_cnn))  # create all possible combinations
        headers = ['1 layer', '2 layer', 'Epochs', 'Accuracy', 'Training time']
    else:
        network_type = 'simple'
        reshape_data = True
        neural_network = network.SimpleNet(max_epochs=1000)
        all_configurations = list(itertools.product(*configurations_simple_nn))  # create all possible combinations
        headers = ['Activation', 'Hidden neurons', 'Epochs', 'Accuracy', 'Training time']

    # --- setup logging to file and stdout
    root_logger = logging.getLogger()
    root_logger.setLevel(logging.INFO)
    log_formatter = logging.Formatter("%(message)s")
    file_name = "mnist_" + network_type + "_log_{}".format(datetime.now().strftime("%Y%m%d-%H%M%S"))
    file_handler = logging.FileHandler("logs/{0}.log".format(file_name))
    file_handler.setFormatter(log_formatter)
    root_logger.addHandler(file_handler)
    stream_handler = logging.StreamHandler()
    stream_handler.setFormatter(log_formatter)
    root_logger.addHandler(stream_handler)

    # --- load MNIST data
    mnist = read_data_sets('.\\data', one_hot=True, reshape=reshape_data)
    root_logger.info('Loaded MNIST data!')
    root_logger.info(network_type.title() + ' neural network training starts:\n')

    # --- start training
    total_elapsed_time = 0.0; best_accuracy = 0.0; best_conf = None; all_results = []
    for conf in all_configurations:
        neural_network.build(configuration=conf)
        stats = None
        while stats is None:
            stats = neural_network.train(data=mnist, checkpoints=checkpoints)
        elapsed_time = stats[len(stats) - 1][2]
        total_elapsed_time += elapsed_time

        for item in stats:
            all_results.append([conf[0], conf[1], item[0], item[1], item[2]])
        all_results.append([])
        accuracy = max([row[1] for row in stats])  # take max value from all checkpoints
        print('Training finished. Configuration: {}. Accuracy: {:.4f}, Time: {:.1f} sec'
              .format(conf, accuracy, elapsed_time))
        if best_accuracy < accuracy:
            best_accuracy = accuracy
            best_conf = conf

    # --- log results to file
    root_logger.info(tabulate(all_results, headers=headers, floatfmt=".4f", numalign='center', stralign='center'))
    root_logger.info('----------------------------------------------------------------------')
    root_logger.info('Best accuracy: {:.4f}, configuration: {}'.format(best_accuracy, best_conf))
    root_logger.info('Total elapsed time: {:.0f} minutes, {:.1f} seconds'
                     .format(total_elapsed_time // 60, total_elapsed_time % 60))
    root_logger.info('----------------------------------------------------------------------')

    logging.shutdown()
开发者ID:ZdyrkoVlad,项目名称:mnist-cnn,代码行数:60,代码来源:test_network.py

示例3: get_data

def get_data():
    mnist = read_data_sets("../data/", one_hot=True, reshape=True, validation_size=2000)
    X_train = mnist.train.images
    X_val = mnist.validation.images
    Y_train = mnist.train.labels
    Y_val = mnist.validation.labels
    print(X_train.shape, X_train.shape, Y_val.shape, Y_train.shape)
    return X_train, X_val, Y_val, Y_train
开发者ID:SusanJJN,项目名称:dl_tutorial,代码行数:8,代码来源:fc_MNIST_keras_large.py

示例4: main

def main(argv):
    # Get the data.
    data_sets = mnist.read_data_sets(FLAGS.directory, dtype=tf.uint8, reshape=False)

    # Convert to Examples and write the result to TFRecords.
    convert_to(data_sets.train, "train")
    convert_to(data_sets.validation, "validation")
    convert_to(data_sets.test, "test")
开发者ID:tongwang01,项目名称:tensorflow,代码行数:8,代码来源:convert_to_records.py

示例5: main

def main(_):        # _ : unused_argv
    # class type: Dataset
    data_sets = mnist.read_data_sets(cfg.FLAGS.directory,
                                     dtype=tf.uint8,
                                     reshape=False,
                                     validation_size=cfg.FLAGS.validation_size)
    convert_to_tfrecords(data_sets.train, 'train')
    convert_to_tfrecords(data_sets.validation, 'valid')
    convert_to_tfrecords(data_sets.test, 'test')
开发者ID:jamescfli,项目名称:PythonTest,代码行数:9,代码来源:write_tfrecords.py

示例6: maybe_download_and_convert

def maybe_download_and_convert(data_dir):
    # Get the data.
    data_sets = mnist.read_data_sets(data_dir, dtype=tf.uint8, reshape=False)
    # Convert to Examples and write the result to TFRecords.
    filenames = [
        os.path.join(data_dir, name + '.tfrecords')
        for name in ['train', 'validation', 'test']
    ]
    for idx, f in enumerate(filenames):
        if not tf.gfile.Exists(f):
            convert_to_tfr(data_sets[idx], f)
开发者ID:winston-li,项目名称:tensorflow_playground,代码行数:11,代码来源:mnist_input.py

示例7: main

def main(unused_argv):
  # Get the data.
  data_sets = mnist.read_data_sets(FLAGS.directory,
                                   dtype=tf.uint8,
                                   reshape=False,
                                   validation_size=FLAGS.validation_size)

  # Convert to Examples and write the result to TFRecords.
  convert_to(data_sets.train, 'train')
  convert_to(data_sets.validation, 'validation')
  convert_to(data_sets.test, 'test')
开发者ID:1000sprites,项目名称:tensorflow,代码行数:11,代码来源:convert_to_records.py

示例8: main

def main(argv):
  import pdb;pdb.set_trace()
  # Get the data.
  data_sets = mnist.read_data_sets(FLAGS.directory,
                                   dtype=tf.uint8,
                                   reshape=False)

  # Convert to Examples and write the result to TFRecords.
  convert_to(data_sets.train, 'train')
  convert_to(data_sets.validation, 'validation')
  convert_to(data_sets.test, 'test')
开发者ID:stasonhan,项目名称:machine-learing,代码行数:11,代码来源:tfrecoder.py

示例9: main

def main():
    mnist = read_data_sets('/tmp/tensorflow/mnist/input_data', one_hot=True)

    size = 28
    n = size * size
    k = 10
    x = tf.placeholder(tf.float32, (None, n))
    y = tf.placeholder(tf.float32, (None, k))
    keep_prob = tf.placeholder(tf.float32)

    optimizer, z = create_cnn(size, n, k, x, y, keep_prob)
    # optimizer, z = create_perceptron(size, n, k, x, y, keep_prob)

    session = tf.Session()
    session.run(tf.global_variables_initializer())

    for i in range(1000):
        print(i)
        x_data, y_data = mnist.train.next_batch(100)
        session.run(optimizer, {x: x_data, y: y_data, keep_prob: 0.5})

    #

    x_data, y_data = mnist.test.images, mnist.test.labels
    correct = tf.equal(tf.argmax(y, 1), tf.argmax(z, 1))
    accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
    result = session.run(accuracy, {x: x_data, y: y_data, keep_prob: 1.0})
    print(result)

    #

    def draw(image):
        print('--' * 28)
        for i in range(28):
            offset = i * 28
            lines = image[offset:offset + 28]
            print(''.join(['%2d' % (j * 10) for j in lines]))

    z_data = session.run(tf.argmax(z, 1),
                         {x: x_data, y: y_data, keep_prob: 1.0})
    corrects = session.run(correct, {x: x_data, y: y_data, keep_prob: 1.0})

    count = 3
    for i in range(len(corrects)):
        if corrects[i]:
            continue
        draw(x_data[i])
        print('right: %d / predict: %d' %
              (list(y_data[i]).index(1), z_data[i]))
        count -= 1
        if count is 0:
            break
开发者ID:ahastudio,项目名称:CodingLife,代码行数:52,代码来源:test.py

示例10: build_input_pipeline

def build_input_pipeline(data_dir, batch_size, heldout_size, mnist_type):
  """Builds an Iterator switching between train and heldout data."""
  # Build an iterator over training batches.
  if mnist_type in [MnistType.FAKE_DATA, MnistType.THRESHOLD]:
    if mnist_type == MnistType.FAKE_DATA:
      mnist_data = build_fake_data()
    else:
      mnist_data = mnist.read_data_sets(data_dir)
    training_dataset = tf.data.Dataset.from_tensor_slices(
        (mnist_data.train.images, np.int32(mnist_data.train.labels)))
    heldout_dataset = tf.data.Dataset.from_tensor_slices(
        (mnist_data.validation.images,
         np.int32(mnist_data.validation.labels)))
  elif mnist_type == MnistType.BERNOULLI:
    training_dataset = load_bernoulli_mnist_dataset(data_dir, "train")
    heldout_dataset = load_bernoulli_mnist_dataset(data_dir, "valid")
  else:
    raise ValueError("Unknown MNIST type.")

  training_batches = training_dataset.repeat().batch(batch_size)
  training_iterator = training_batches.make_one_shot_iterator()

  # Build a iterator over the heldout set with batch_size=heldout_size,
  # i.e., return the entire heldout set as a constant.
  heldout_frozen = (heldout_dataset.take(heldout_size).
                    repeat().batch(heldout_size))
  heldout_iterator = heldout_frozen.make_one_shot_iterator()

  # Combine these into a feedable iterator that can switch between training
  # and validation inputs.
  handle = tf.placeholder(tf.string, shape=[])
  feedable_iterator = tf.data.Iterator.from_string_handle(
      handle, training_batches.output_types, training_batches.output_shapes)
  images, labels = feedable_iterator.get_next()
  # Reshape as a pixel image and binarize pixels.
  images = tf.reshape(images, shape=[-1] + IMAGE_SHAPE)
  if mnist_type in [MnistType.FAKE_DATA, MnistType.THRESHOLD]:
    images = tf.cast(images > 0.5, dtype=tf.int32)

  return images, labels, handle, training_iterator, heldout_iterator
开发者ID:asudomoeva,项目名称:probability,代码行数:40,代码来源:vq_vae.py

示例11: main

def main(_):

    print("Downloading and reading data sets...")
    mnist = read_data_sets(FLAGS.data_dir, one_hot=True)

    # Create the model
    x = tf.placeholder(tf.float32, [None, 784])
    W = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))
    y = tf.matmul(x, W) + b

    y_ = tf.placeholder(tf.float32, [None, 10])

    # Define loss and optimizer
    # The raw formulation of cross-entropy,
    #   tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.softmax(y)),
    #                                 reduction_indices=[1]))
    # can be numerically unstable.
    # So here we use tf.nn.softmax_cross_entropy_with_logits on the raw
    # outputs of 'y', and then average across the batch.
    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(y, y_))
    train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

    sess = tf.InteractiveSession()
    tf.initialize_all_variables().run()
    # Train
    print("training for %s steps" % FLAGS.num_steps)
    for _ in xrange(FLAGS.num_steps):
        batch_xs, batch_ys = mnist.train.next_batch(100)
        sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})

    # Test trained model
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    print("accuracy: %s " % sess.run(accuracy,
                                     feed_dict={x: mnist.test.images,
                                                y_: mnist.test.labels}))
开发者ID:ccortezb,项目名称:pipeline,代码行数:38,代码来源:mnist_simple.py

示例12: main

def main(_):
  # Import data
  mnist = read_data_sets('/tmp/tensorflow/mnist/input_data', one_hot=True)

  x = tf.placeholder(tf.float32, [None, 784])
  W = tf.Variable(tf.zeros([784, 10]))
  b = tf.Variable(tf.zeros([10]))
  y = tf.matmul(x, W) + b

  y_ = tf.placeholder(tf.float32, [None, 10])

  cross_entropy = tf.reduce_mean(
      tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
  trainer = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

  with tf.Session() as sess:
    tf.global_variables_initializer().run()
    for _ in range(1000):
      batch_xs, batch_ys = mnist.train.next_batch(100)
      sess.run(trainer, feed_dict={x: batch_xs, y_: batch_ys})

    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
开发者ID:lzqkean,项目名称:deep_learning,代码行数:24,代码来源:1a_mnist_softmax.py

示例13: category

# digits and create a simple CNN network to predict the
# digit category (0-9)

import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
from tensorflow.python.framework import ops
ops.reset_default_graph()

# Start a graph session
sess = tf.Session()

# Load data
data_dir = 'temp'
mnist = read_data_sets(data_dir)

# Convert images into 28x28 (they are downloaded as 1x784)
train_xdata = np.array([np.reshape(x, (28,28)) for x in mnist.train.images])
test_xdata = np.array([np.reshape(x, (28,28)) for x in mnist.test.images])

# Convert labels into one-hot encoded vectors
train_labels = mnist.train.labels
test_labels = mnist.test.labels

# Set model parameters
batch_size = 100
learning_rate = 0.005
evaluation_size = 500
image_width = train_xdata[0].shape[0]
image_height = train_xdata[0].shape[1]
开发者ID:hzw1199,项目名称:TensorFlow-Machine-Learning-Cookbook,代码行数:31,代码来源:introductory_cnn.py

示例14: main

def main(_):
    """Build the full graph for feeding inputs, training, and
    saving checkpoints.  Run the training. Then, load the saved graph and
    run some predictions."""

    # Get input data: get the sets of images and labels for training,
    # validation, and test on MNIST.
    data_sets = read_data_sets(FLAGS.data_dir, False)

    mnist_graph = tf.Graph()
    with mnist_graph.as_default():
        # Generate placeholders for the images and labels.
        images_placeholder = tf.placeholder(tf.float32)
        labels_placeholder = tf.placeholder(tf.int32)
        tf.add_to_collection("images", images_placeholder)  # Remember this Op.
        tf.add_to_collection("labels", labels_placeholder)  # Remember this Op.

        # Build a Graph that computes predictions from the inference model.
        logits = mnist_inference(images_placeholder,
                                 HIDDEN1_UNITS,
                                 HIDDEN2_UNITS)
        tf.add_to_collection("logits", logits)  # Remember this Op.

        # Add to the Graph the Ops that calculate and apply gradients.
        train_op, loss = mnist_training(
            logits, labels_placeholder, 0.01)

        # prediction accuracy
        _, indices_op = tf.nn.top_k(logits)
        flattened = tf.reshape(indices_op, [-1])
        correct_prediction = tf.cast(
            tf.equal(labels_placeholder, flattened), tf.float32)
        accuracy = tf.reduce_mean(correct_prediction)

        # Define info to be used by the SummaryWriter. This will let
        # TensorBoard plot values during the training process.
        loss_summary = tf.scalar_summary("loss", loss)
        acc_summary = tf.scalar_summary("accuracy", accuracy)
        train_summary_op = tf.merge_summary([loss_summary, acc_summary])

        # Add the variable initializer Op.
        init = tf.initialize_all_variables()

        # Create a saver for writing training checkpoints.
        saver = tf.train.Saver()

        # Create a summary writer.
        print("Writing Summaries to %s" % FLAGS.model_dir)
        train_summary_writer = tf.train.SummaryWriter(FLAGS.model_dir)

        # Uncomment the following line to see what we have constructed.
        # tf.train.write_graph(tf.get_default_graph().as_graph_def(),
        #                      "/tmp", "complete.pbtxt", as_text=True)

    # Run training for MAX_STEPS and save checkpoint at the end.
    with tf.Session(graph=mnist_graph) as sess:
        # Run the Op to initialize the variables.
        sess.run(init)

        # Start the training loop.
        for step in xrange(FLAGS.num_steps):
            # Read a batch of images and labels.
            images_feed, labels_feed = data_sets.train.next_batch(BATCH_SIZE)

            # Run one step of the model.  The return values are the activations
            # from the `train_op` (which is discarded) and the `loss` Op.  To
            # inspect the values of your Ops or variables, you may include them
            # in the list passed to sess.run() and the value tensors will be
            # returned in the tuple from the call.
            _, loss_value, tsummary, acc = sess.run(
                [train_op, loss, train_summary_op, accuracy],
                feed_dict={images_placeholder: images_feed,
                           labels_placeholder: labels_feed})
            if step % 100 == 0:
                # Write summary info
                train_summary_writer.add_summary(tsummary, step)
            if step % 1000 == 0:
                # Print loss/accuracy info
                print('----Step %d: loss = %.4f' % (step, loss_value))
                print("accuracy: %s" % acc)

        print("\nWriting checkpoint file.")
        checkpoint_file = os.path.join(FLAGS.model_dir, 'checkpoint')
        saver.save(sess, checkpoint_file, global_step=step)
        _, loss_value = sess.run(
            [train_op, loss],
            feed_dict={images_placeholder: data_sets.test.images,
                       labels_placeholder: data_sets.test.labels})
        print("Test set loss: %s" % loss_value)

    # Run evaluation based on the saved checkpoint.
    with tf.Session(graph=tf.Graph()) as sess:
        checkpoint_file = tf.train.latest_checkpoint(FLAGS.model_dir)
        print("\nRunning evaluation based on saved checkpoint.")
        print("checkpoint file: {}".format(checkpoint_file))
        # Load the saved meta graph and restore variables
        saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
        saver.restore(sess, checkpoint_file)

        # Retrieve the Ops we 'remembered'.
#.........这里部分代码省略.........
开发者ID:ccortezb,项目名称:pipeline,代码行数:101,代码来源:mnist_hidden.py

示例15: main

def main(argv):
  del argv  # unused
  if tf.gfile.Exists(FLAGS.model_dir):
    tf.logging.warning(
        "Warning: deleting old log directory at {}".format(FLAGS.model_dir))
    tf.gfile.DeleteRecursively(FLAGS.model_dir)
  tf.gfile.MakeDirs(FLAGS.model_dir)

  if FLAGS.fake_data:
    mnist_data = build_fake_data()
  else:
    mnist_data = mnist.read_data_sets(FLAGS.data_dir, reshape=False)

  (images, labels, handle,
   training_iterator, heldout_iterator) = build_input_pipeline(
       mnist_data, FLAGS.batch_size, mnist_data.validation.num_examples)

  # Build a Bayesian LeNet5 network. We use the Flipout Monte Carlo estimator
  # for the convolution and fully-connected layers: this enables lower
  # variance stochastic gradients than naive reparameterization.
  with tf.name_scope("bayesian_neural_net", values=[images]):
    neural_net = tf.keras.Sequential([
        tfp.layers.Convolution2DFlipout(6,
                                        kernel_size=5,
                                        padding="SAME",
                                        activation=tf.nn.relu),
        tf.keras.layers.MaxPooling2D(pool_size=[2, 2],
                                     strides=[2, 2],
                                     padding="SAME"),
        tfp.layers.Convolution2DFlipout(16,
                                        kernel_size=5,
                                        padding="SAME",
                                        activation=tf.nn.relu),
        tf.keras.layers.MaxPooling2D(pool_size=[2, 2],
                                     strides=[2, 2],
                                     padding="SAME"),
        tfp.layers.Convolution2DFlipout(120,
                                        kernel_size=5,
                                        padding="SAME",
                                        activation=tf.nn.relu),
        tf.keras.layers.Flatten(),
        tfp.layers.DenseFlipout(84, activation=tf.nn.relu),
        tfp.layers.DenseFlipout(10)
        ])

    logits = neural_net(images)
    labels_distribution = tfd.Categorical(logits=logits)

  # Compute the -ELBO as the loss, averaged over the batch size.
  neg_log_likelihood = -tf.reduce_mean(labels_distribution.log_prob(labels))
  kl = sum(neural_net.losses) / mnist_data.train.num_examples
  elbo_loss = neg_log_likelihood + kl

  # Build metrics for evaluation. Predictions are formed from a single forward
  # pass of the probabilistic layers. They are cheap but noisy predictions.
  predictions = tf.argmax(logits, axis=1)
  accuracy, accuracy_update_op = tf.metrics.accuracy(
      labels=labels, predictions=predictions)

  # Extract weight posterior statistics for layers with weight distributions
  # for later visualization.
  names = []
  qmeans = []
  qstds = []
  for i, layer in enumerate(neural_net.layers):
    try:
      q = layer.kernel_posterior
    except AttributeError:
      continue
    names.append("Layer {}".format(i))
    qmeans.append(q.mean())
    qstds.append(q.stddev())

  with tf.name_scope("train"):
    optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
    train_op = optimizer.minimize(elbo_loss)

  init_op = tf.group(tf.global_variables_initializer(),
                     tf.local_variables_initializer())

  with tf.Session() as sess:
    sess.run(init_op)

    # Run the training loop.
    train_handle = sess.run(training_iterator.string_handle())
    heldout_handle = sess.run(heldout_iterator.string_handle())
    for step in range(FLAGS.max_steps):
      _ = sess.run([train_op, accuracy_update_op],
                   feed_dict={handle: train_handle})

      if step % 100 == 0:
        loss_value, accuracy_value = sess.run(
            [elbo_loss, accuracy], feed_dict={handle: train_handle})
        print("Step: {:>3d} Loss: {:.3f} Accuracy: {:.3f}".format(
            step, loss_value, accuracy_value))

      if (step+1) % FLAGS.viz_steps == 0:
        # Compute log prob of heldout set by averaging draws from the model:
        # p(heldout | train) = int_model p(heldout|model) p(model|train)
        #                   ~= 1/n * sum_{i=1}^n p(heldout | model_i)
#.........这里部分代码省略.........
开发者ID:asudomoeva,项目名称:probability,代码行数:101,代码来源:bayesian_neural_network.py


注:本文中的tensorflow.contrib.learn.python.learn.datasets.mnist.read_data_sets函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。