当前位置: 首页>>代码示例>>Python>>正文


Python input_data.read_data_sets方法代码示例

本文整理汇总了Python中input_data.read_data_sets方法的典型用法代码示例。如果您正苦于以下问题:Python input_data.read_data_sets方法的具体用法?Python input_data.read_data_sets怎么用?Python input_data.read_data_sets使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在input_data的用法示例。


在下文中一共展示了input_data.read_data_sets方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: import input_data [as 别名]
# 或者: from input_data import read_data_sets [as 别名]
def __init__(self):
        self.mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
        self.n_samples = self.mnist.train.num_examples

        self.n_hidden = 500
        self.n_z = 20
        self.batchsize = 100

        self.images = tf.placeholder(tf.float32, [None, 784])
        image_matrix = tf.reshape(self.images,[-1, 28, 28, 1])
        z_mean, z_stddev = self.recognition(image_matrix)
        samples = tf.random_normal([self.batchsize,self.n_z],0,1,dtype=tf.float32)
        guessed_z = z_mean + (z_stddev * samples)

        self.generated_images = self.generation(guessed_z)
        generated_flat = tf.reshape(self.generated_images, [self.batchsize, 28*28])

        self.generation_loss = -tf.reduce_sum(self.images * tf.log(1e-8 + generated_flat) + (1-self.images) * tf.log(1e-8 + 1 - generated_flat),1)

        self.latent_loss = 0.5 * tf.reduce_sum(tf.square(z_mean) + tf.square(z_stddev) - tf.log(tf.square(z_stddev)) - 1,1)
        self.cost = tf.reduce_mean(self.generation_loss + self.latent_loss)
        self.optimizer = tf.train.AdamOptimizer(0.001).minimize(self.cost)


    # encoder 
开发者ID:kvfrans,项目名称:variational-autoencoder,代码行数:27,代码来源:main.py

示例2: read_data_sets

# 需要导入模块: import input_data [as 别名]
# 或者: from input_data import read_data_sets [as 别名]
def read_data_sets():

	basepath = '/'.join(__file__.split('/')[:-1])

	import input_data
	data = input_data.read_data_sets(basepath+'/bin', one_hot=True)

	import os
	import numpy as np
	if not os.path.isfile(basepath+'/bin/permutation.npy'):
		indices = np.random.permutation(28**2)
		os.makedirs(basepath+'/bin', exist_ok=True)
		np.save(basepath+'/bin/permutation.npy', indices)
	else:
		indices = np.load(basepath+'/bin/permutation.npy')

	data.train.images[:,:] = data.train.images[:,indices]
	data.validation.images[:,:] = data.validation.images[:,indices]
	data.test.images[:,:] = data.test.images[:,indices]

	return data 
开发者ID:jostmey,项目名称:rwa,代码行数:23,代码来源:input_data_permuted.py

示例3: train_and_save_model

# 需要导入模块: import input_data [as 别名]
# 或者: from input_data import read_data_sets [as 别名]
def train_and_save_model(self, data_location, save_location):
        # Our training data
        mnist = input_data.read_data_sets(data_location, one_hot=True)

        for i in range(20000):
            batch = mnist.train.next_batch(50)
            if i%100 == 0:
                train_accuracy = self.accuracy.eval(feed_dict={
                    self.x:batch[0], self.y_: batch[1], self.keep_prob: 1.0
                })
                print("step %d, training accuracy %g"%(i, train_accuracy))
            self.train_step.run(feed_dict={self.x: batch[0], self.y_: batch[1], self.keep_prob: 0.5})

        # Saves path
        save_path = saver.save(sess, save_location)
        print("Model saved in file: ", save_path)

    # Loads saved model 
开发者ID:kendricktan,项目名称:openimif,代码行数:20,代码来源:imif_digits.py

示例4: train_and_save_model

# 需要导入模块: import input_data [as 别名]
# 或者: from input_data import read_data_sets [as 别名]
def train_and_save_model(self):
        # Our training data
        mnist = input_data.read_data_sets('../data/MNIST_digits', one_hot=True)

        for i in range(20000):
            batch = mnist.train.next_batch(50)
            if i%100 == 0:
                train_accuracy = self.accuracy.eval(feed_dict={
                    self.x:batch[0], self.y_: batch[1], self.keep_prob: 1.0
                })
                print("step %d, training accuracy %g"%(i, train_accuracy))
            self.train_step.run(feed_dict={self.x: batch[0], self.y_: batch[1], self.keep_prob: 0.5})

        # Saves path
        save_path = saver.save(sess, "../trained_models/mnist_digits.ckpt")
        print("Model saved in file: ", save_path)

    # Loads saved model 
开发者ID:kendricktan,项目名称:openimif,代码行数:20,代码来源:imid_digits.py

示例5: main

# 需要导入模块: import input_data [as 别名]
# 或者: from input_data import read_data_sets [as 别名]
def main(_):
  # Import data
  mnist = input_data.read_data_sets('data', one_hot=True, validation_size=0)

  # Create the model
  x = tf.placeholder(tf.float32, [None, 784])

  # Define loss and optimizer
  y_ = tf.placeholder(tf.float32, [None, 10])

  W_fc1 = weight_variable([28*28, 1000])
  b_fc1 = bias_variable([1000])
  h_fc1 = tf.nn.relu(tf.matmul(x, W_fc1) + b_fc1)

  W_fc2 = weight_variable([1000, 1000])
  b_fc2 = bias_variable([1000])
  h_fc2 = tf.nn.relu(tf.matmul(h_fc1, W_fc2) + b_fc2)
  
  W_fc3 = weight_variable([1000, 10])
  b_fc3 = bias_variable([10])
  out = tf.matmul(h_fc2, W_fc3) + b_fc3
  
  cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=out))
  train_step = cocob_optimizer.COCOB().minimize(cross_entropy)
  correct_prediction = tf.equal(tf.argmax(out, 1), tf.argmax(y_, 1))
  accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

  with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for i in range(600*40):
      batch = mnist.train.next_batch(100)
      if i % 600 == 0:
        test_batch_size = 10000
        batch_num = int(mnist.train.num_examples / test_batch_size)
        train_loss = 0
    
        for j in range(batch_num):
            train_loss += cross_entropy.eval(feed_dict={x: mnist.train.images[test_batch_size*j:test_batch_size*(j+1), :],
                                              y_: mnist.train.labels[test_batch_size*j:test_batch_size*(j+1), :]})
            
        train_loss /= batch_num

        test_err = 1-accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels})

        print('epoch %d, training cost %g, test error %g ' % (i/600, train_loss, test_err))
      train_step.run(feed_dict={x: batch[0], y_: batch[1]}) 
开发者ID:bremen79,项目名称:cocob,代码行数:48,代码来源:mnist_fully_connected.py

示例6: main

# 需要导入模块: import input_data [as 别名]
# 或者: from input_data import read_data_sets [as 别名]
def main():
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=False)
    data = mnist.train.next_batch(8000)
    train_x = data[0]
    Y = data[1]
    train_y = (np.arange(np.max(Y) + 1) == Y[:, None]).astype(int)
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=False)
    tb = mnist.train.next_batch(2000)
    Y_test = tb[1]
    X_test = tb[0]
    # 0.00002-92
    # 0.000005-92, 93 when 200000 190500

    d1 = Digit_Recognizer_LR.model(train_x.T, train_y.T, Y, X_test.T, Y_test, num_iters=1500, alpha=0.05,
                                   print_cost=True)
    w_LR = d1["w"]
    b_LR = d1["b"]

    d2 = Digit_Recognizer_NN.model_nn(train_x.T, train_y.T, Y, X_test.T, Y_test, n_h=100, num_iters=1500, alpha=0.05,
                                      print_cost=True)

    dims = [784, 100, 80, 50, 10]
    d3 = Digit_Recognizer_DL.model_DL(train_x.T, train_y.T, Y, X_test.T, Y_test, dims, alpha=0.5, num_iterations=1100,
                                      print_cost=True)

    cap = cv2.VideoCapture(0)

    while (cap.isOpened()):
        ret, img = cap.read()
        img, contours, thresh = get_img_contour_thresh(img)
        ans1 = ''
        ans2 = ''
        ans3 = ''
        if len(contours) > 0:
            contour = max(contours, key=cv2.contourArea)
            if cv2.contourArea(contour) > 2500:
                # print(predict(w_from_model,b_from_model,contour))
                x, y, w, h = cv2.boundingRect(contour)
                # newImage = thresh[y - 15:y + h + 15, x - 15:x + w +15]
                newImage = thresh[y:y + h, x:x + w]
                newImage = cv2.resize(newImage, (28, 28))
                newImage = np.array(newImage)
                newImage = newImage.flatten()
                newImage = newImage.reshape(newImage.shape[0], 1)
                ans1 = Digit_Recognizer_LR.predict(w_LR, b_LR, newImage)
                ans2 = Digit_Recognizer_NN.predict_nn(d2, newImage)
                ans3 = Digit_Recognizer_DL.predict(d3, newImage)

        x, y, w, h = 0, 0, 300, 300
        cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
        cv2.putText(img, "Logistic Regression : " + str(ans1), (10, 320),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        cv2.putText(img, "Shallow Network :  " + str(ans2), (10, 350),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        cv2.putText(img, "Deep Network :  " + str(ans3), (10, 380),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        cv2.imshow("Frame", img)
        cv2.imshow("Contours", thresh)
        k = cv2.waitKey(10)
        if k == 27:
            break 
开发者ID:akshaybahadur21,项目名称:Digit-Recognizer,代码行数:63,代码来源:Digit-Recognizer.py


注:本文中的input_data.read_data_sets方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。