当前位置: 首页>>代码示例>>Python>>正文


Python DataSet.next_batch方法代码示例

本文整理汇总了Python中dataset.DataSet.next_batch方法的典型用法代码示例。如果您正苦于以下问题:Python DataSet.next_batch方法的具体用法?Python DataSet.next_batch怎么用?Python DataSet.next_batch使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在dataset.DataSet的用法示例。


在下文中一共展示了DataSet.next_batch方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: LeNet

# 需要导入模块: from dataset import DataSet [as 别名]
# 或者: from dataset.DataSet import next_batch [as 别名]

#.........这里部分代码省略.........
        trainable=True, name='biases')
      pool2_flat = tf.reshape(self.pool2, [-1, shape])
      fc1l = tf.nn.bias_add(tf.matmul(pool2_flat, fc1w), fc1b)
      self.fc1 = tf.nn.relu(fc1l)
      self.dropout1 = tf.nn.dropout(self.fc1, keep_prob=self.keep_prob, name='dropout1')
      self.parameters += [fc1w, fc1b]

    # fc2
    with tf.name_scope('fc2') as scope:
      fc2w = tf.Variable(tf.random_normal([128, 10],
        dtype=tf.float32, stddev=1e-1), name='weights')
      fc2b = tf.Variable(tf.constant(1.0, shape=[10], dtype=tf.float32),
        trainable=True, name='biases')
      fc2l = tf.nn.bias_add(tf.matmul(self.dropout1, fc2w), fc2b)
      self.logits = tf.nn.relu(fc2l)
      self.parameters += [fc2w, fc2b]

  def load_weights(weights, sess):
    None

  def train(self, learning_rate, training_epochs, batch_size, keep_prob):
    # Load dataset for training and testing
    self.dataset = DataSet()

    # Define size of output
    self.Y = tf.placeholder(tf.float32, [None, 10], name='Y')
    # Define cost function
    self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.Y))
    # Define optimization method
    self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)

    # Start logger
    if self.log:
        tf.summary.scalar('cost', self.cost)
        self.merged = tf.summary.merge_all()
        self.train_writer = tf.summary.FileWriter('./log_train', self.sess.graph)

    self.sess.run(tf.global_variables_initializer())
    self.sess.run(tf.local_variables_initializer())

    print('Training...')
    weights = []
    # For each epoch, feed training data and perform updating parameters
    for epoch in range(training_epochs):
        avg_cost = 0
        # Number of batches = size of training set / batch_size
        total_batch = int(self.dataset.get_train_set_size() / batch_size)

        # For each batch 
        for i in range(total_batch + 1):
            # Get next batch to feed to the network
            batch_xs, batch_ys = self.dataset.next_batch(batch_size)
            feed_dict = {
                self.X: batch_xs.reshape([batch_xs.shape[0], 28, 28, 1]),
                self.Y: batch_ys,
                self.keep_prob: keep_prob
            }

            weights, summary, c, _ = self.sess.run([self.parameters, self.merged, self.cost, self.optimizer],
                                                   feed_dict=feed_dict)
            avg_cost += c / total_batch

        if self.log:
            self.train_writer.add_summary(summary, epoch + 1)

        print('Epoch:', '%02d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))

    print('Training finished!')

    saver = tf.train.Saver()
    save_path = saver.save(self.sess, model_dir + "/mnist_lenet.ckpt")
    print("Trainned model is saved in file: %s" % save_path)

  def evaluate(self, batch_size, keep_prob):

    self.correct_prediction = tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.Y, 1))
    self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))

    N = self.dataset.get_test_set_size()
    print('test.size', N);
    correct_sample = 0
    for i in range(0, N, batch_size):
        batch_xs, batch_ys = self.dataset.next_batch_test(batch_size)

        N_batch = batch_xs.shape[0]

        feed_dict = {
            self.X: batch_xs.reshape([N_batch, 28, 28, 1]),
            self.Y: batch_ys,
            self.keep_prob: keep_prob
        }

        correct = self.sess.run(self.accuracy, feed_dict=feed_dict)
        correct_sample += correct * N_batch

    test_accuracy = correct_sample / N

    print("\nAccuracy Evaluates")
    print("-" * 30)
    print('Test Accuracy:', test_accuracy)
开发者ID:vuamitom,项目名称:Code-Exercises,代码行数:104,代码来源:train.py

示例2: list

# 需要导入模块: from dataset import DataSet [as 别名]
# 或者: from dataset.DataSet import next_batch [as 别名]
learningRate = 0.00003
trainStep = tf.train.AdamOptimizer(learningRate).minimize(crossEntropy)

# Define the evaluation function.
correctPrediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correctPrediction, "float"))

sess.run(tf.initialize_all_variables())
accuracyTrain = list()
accuracyTest = list()

# Train.
start = time.time()
print("{}: Start training.".format(start))
for i in range(10000):
    batchXs, batchYs = trainData.next_batch(256)
    trainStep.run(feed_dict = {x: batchXs, y_: batchYs})
    if i%100 == 0:
        batchXs, batchYs = trainData.next_batch(4096)
        accuracyTrain.append(sess.run(accuracy, feed_dict={x: batchXs, y_: batchYs}))
        batchXs, batchYs = testData.next_batch(4096)
        accuracyTest.append(sess.run(accuracy, feed_dict={x: batchXs, y_: batchYs}))
        # Decay learning rate over time. Doesn't work with AdamOptimizer...
        #learningRate *= 0.99
        #trainStep = tf.train.AdamOptimizer(learningRate).minimize(crossEntropy)
end = time.time()
print("{}: Stop training.".format(end))
valueW = sess.run(wOut)
print(valueW)
valueB = sess.run(bOut)
print(valueB)
开发者ID:herckens,项目名称:ecg_analyzer,代码行数:33,代码来源:plot_data.py

示例3: main

# 需要导入模块: from dataset import DataSet [as 别名]
# 或者: from dataset.DataSet import next_batch [as 别名]
def main():

	#Import and format data into tensors.
	dir_json = 'data_set_1000.json'
	data = json.loads(open(dir_json).read())

	training_x = []
	testing_x = []

	training_y = []
	testing_y = []

	MAX_311 = 285

	for point in data:
		if (point['index'] % 2 == 0):
			append_vector(testing_x, testing_y, point)
		else:
			append_vector(training_x, training_y, point)

	train_data = DataSet(training_x, training_y)

	train_len = len(training_x)
	test_len = len(testing_x)
	batch_size = 400
	steps = 16000

	#Create session.
	sess = tf.InteractiveSession()

	#Input placeholders.
	x = tf.placeholder(tf.float32, [None, 10])
	y_expected = tf.placeholder(tf.float32, shape=[None, MAX_311])

	#Neural network hidden layers and variables.
	W1 = tf.Variable(tf.random_normal([10, 500]))
	b1 = tf.Variable(tf.constant(0.0, shape=[500]))
	h1 = tf.nn.relu(tf.matmul(x, W1) + b1)

	W2 = tf.Variable(tf.random_normal([500, 500]))
	b2 = tf.Variable(tf.constant(0.0, shape=[500]))
	h2 = tf.nn.relu(tf.matmul(h1, W2) + b2)

	W3 = tf.Variable(tf.random_normal([500, 1000]))
	b3 = tf.Variable(tf.constant(0.0, shape=[1000]))
	h3 = tf.nn.relu(tf.matmul(h2, W3) + b3)

	h3 = tf.nn.dropout(h3, 0.5)

	#Neural network output.
	W4 = tf.Variable(tf.random_normal([1000, MAX_311]))
	b4 = tf.Variable(tf.constant(0.0, shape=[MAX_311]))
	y = tf.nn.relu(tf.matmul(h3, W4) + b4)

	#Error measure.
	cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_expected))

	#Minimize cross_entropy with gradient descent.
	train_step = tf.train.GradientDescentOptimizer(.00001).minimize(cross_entropy)

	#Calculate error.
	correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_expected, 1))
	error = 1 - tf.reduce_mean(tf.cast(correct_prediction, "float"))

	#Train.
	saver = tf.train.Saver()
	ces = [0]*(train_len * steps / batch_size)
	errors = [0]*(((train_len / batch_size + 10) * steps) / 1)

	full_set = {x : training_x, y_expected : training_y}

	sess.run(tf.initialize_all_variables())
	validation = {x : testing_x, y_expected : testing_y}
	for i in range(steps):
		for j in range(train_len / batch_size):
			batch_xs, batch_ys = train_data.next_batch(batch_size)
			feed = {x : batch_xs, y_expected : batch_ys}
			train_step.run(feed_dict=feed)
			ce = cross_entropy.eval(feed_dict=feed)
			ces[((train_len / batch_size)* i)+j] = ce
			if (j % 10 == 0):
				e = error.eval(feed_dict=validation)
				errors[(train_len * i / (10 * batch_size)) + (j / 10)] = e
		print "Epoch " + str(i)

	print "Test data accuracy:"
	print 1 - error.eval(feed_dict=validation)

	temp = 0

	for a in xrange(len(ces)):
		if ces[a] != 0:
			temp = a

	ces = ces[0:temp]
	temp = 0

	for a in xrange(len(errors)):
		if errors[a] != 0:
			temp = a
#.........这里部分代码省略.........
开发者ID:ricardocdlr,项目名称:SF_crime,代码行数:103,代码来源:MLP_311.py

示例4: Model

# 需要导入模块: from dataset import DataSet [as 别名]
# 或者: from dataset.DataSet import next_batch [as 别名]
class Model(object):
    def __init__(self, config):
        self.config = config
        self.data = DataSet(self.config)
        self.add_placeholders()
        self.summarizer = tf.summary
        self.net = Network(config)
        self.saver = tf.train.Saver()
        self.epoch_count, self.second_epoch_count = 0, 0
        self.outputs, self.prob = self.net.neural_search()
        self.hyperparams = self.net.gen_hyperparams(self.outputs)
        self.hype_list = [1 for i in range(self.config.hyperparams)] #[7, 7, 24, 5, 5, 36, 3, 3, 48, 64]
        self.reinforce_loss = self.net.REINFORCE(self.prob)
        self.tr_cont_step = self.net.train_controller(self.reinforce_loss, self.val_accuracy)
        self.cNet, self.y_pred = self.init_child(self.hype_list)
        self.cross_loss, self.accuracy, self.tr_model_step = self.grow_child()
        self.init = tf.global_variables_initializer()
        self.local_init = tf.local_variables_initializer()

    def add_placeholders(self):
        self.X = tf.placeholder(tf.float32, shape=[None, 3072])
        self.Y = tf.placeholder(tf.float32, shape=[None, 10])
        self.val_accuracy = tf.placeholder(tf.float32)
        self.keep_prob = tf.placeholder(tf.float32)

    def init_child(self, hype_list):
        cNet = ChildNetwork(self.config, hype_list)
        y_pred = cNet.run_model(self.X, self.keep_prob)
        return cNet, y_pred

    def grow_child(self):
        self.cross_loss = self.cNet.model_loss(self.y_pred, self.Y)
        self.accuracy = self.cNet.accuracy(self.y_pred, self.Y)
        self.tr_model_step = self.cNet.train_model(self.cross_loss)
        return self.cross_loss, self.accuracy, self.tr_model_step

    def run_model_epoch(self, sess, data, summarizer, epoch):
        X, Y, i, err= None, None, 0, list()
        merged_summary = self.summarizer.merge_all()
        for X, Y, tot in self.data.next_batch(data):
            feed_dict = {self.X : X, self.Y : Y, self.keep_prob : self.config.solver.dropout}
            loss, _ = sess.run([self.cross_loss, self.tr_model_step], feed_dict=feed_dict)
            output = "Epoch ({}-{}) Batch({}) : Loss = {}".format(self.epoch_count, self.second_epoch_count, i , loss)
            with open("../stdout/train.log", "a+") as log:
                log.write(output + "\n")
            print("   {}".format(output), end='\r')
            step = int(epoch*tot + i)
            err.append(loss) 
            #summarizer.add_summary(summ, step)
            i += 1
        err = np.asarray(err)
        return np.mean(err), step

    def run_model_eval(self, sess, data="validation", summary_writer=None, step=0):
        y, y_pred, loss_, loss, i, acc, accuracy = list(), list(), 0.0, 0.0, 0, 0.0, list()
        merged_summary = self.summarizer.merge_all()
        for X, Y, tot in self.data.next_batch(data):
            feed_dict = {self.X: X, self.Y: Y, self.keep_prob: 1.0}
            loss_, acc =  sess.run([self.cross_loss, self.accuracy], feed_dict=feed_dict)
            #summary_writer.add_summary(summ, step)
            loss += loss_
            accuracy.append(acc)
            i += 1
        accuracy.sort()
        return loss / i, accuracy # Reward = cube(last 5 validation accuracy)

    def add_summaries(self, sess):
        if self.config.load:
            path_ = "../results/tensorboard"
        else :
            path_ = "../bin/results/tensorboard"
        summary_writer_train = tf.summary.FileWriter(path_ + "/train", sess.graph)
        summary_writer_val = tf.summary.FileWriter(path_ + "/val", sess.graph)
        summary_writer_test = tf.summary.FileWriter(path_+ "/test", sess.graph)
        summary_writers = {'train': summary_writer_train, 'val': summary_writer_val, 'test': summary_writer_test}
        return summary_writers

    def fit(self, sess, summarizer):
        sess.run(self.init)
        sess.run(self.local_init)
        max_epochs = self.config.max_epochs
        self.epoch_count, val_accuracy, reward = 0, 0.0, 1.0
        while self.epoch_count < max_epochs:
            # Creation of new Child Network from new Hyperparameters
            self.hype_list = sess.run(self.hyperparams)
            hyperfoo = {"Filter Row 1": self.hype_list[0], "Filter Column 1": self.hype_list[1], "No Filter 1": self.hype_list[2], "Filter Row 2": self.hype_list[3], "Filter Column 2": self.hype_list[4], "No Filter 2": self.hype_list[5], "Filter Row 3": self.hype_list[6], "Filter Column 3": self.hype_list[7], "No Filter 3": self.hype_list[8], "No Neurons": self.hype_list[9]}
            output = ""
            for key in hyperfoo:
                output += "{} : {}\n".format(key, hyperfoo[key])
            with open("../stdout/hyperparams.log", "a+") as f:
                f.write(output + "\n\n")
            print(sess.run(self.outputs))
            print(output + "\n")
            self.second_epoch_count = 0
            while self.second_epoch_count < max_epochs :
                average_loss, tr_step = self.run_model_epoch(sess, "train", summarizer['train'], self.second_epoch_count)
                if not self.config.debug:
                    val_loss, val_accuracy = self.run_model_eval(sess, "validation", summarizer['val'], tr_step)
                    reward = sum(val_accuracy[-5:]) ** 3
                    output =  "=> Training : Loss = {:.3f} | Validation : Loss = {:.3f}, Accuracy : {:.3f}".format(average_loss, val_loss, val_accuracy[-1])
#.........这里部分代码省略.........
开发者ID:DanielLSM,项目名称:Neural-Architecture-Search-with-RL,代码行数:103,代码来源:__main__.py


注:本文中的dataset.DataSet.next_batch方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。