本文整理汇总了Python中tensorflow.argmax函数的典型用法代码示例。如果您正苦于以下问题:Python argmax函数的具体用法?Python argmax怎么用?Python argmax使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了argmax函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: eval
def eval(self, data, label, lens):
predictions = []
vals = []
for i in range(data.shape[0]/self.batch_size):
D = data[range(self.batch_size*i,self.batch_size*(i+1))]
L = label[range(self.batch_size*i,self.batch_size*(i+1))]
if lens is not None:
l = lens[range(self.batch_size*i,self.batch_size*(i+1))]
feed_dict={self.dataset:D, self.labels:L, self.lengths:l}
else:
feed_dict={self.dataset:D, self.labels:L}
predictions.extend(self.sess.run(self.correct_prediction, feed_dict))
vals.extend(self.sess.run(tf.argmax(self.logits,1), feed_dict))
## DO THE EXTRA
last_chunk = self.batch_size*(i+1)
gap = self.batch_size - (data.shape[0] - last_chunk)
D = np.pad(data[last_chunk:], ((0,gap),(0,0)), mode='constant', constant_values=0)
L = np.pad(label[last_chunk:], ((0,gap),(0,0)), mode='constant', constant_values=0)
if lens is not None:
l = np.pad(lens[last_chunk:], (0,gap), mode='constant', constant_values=0)
feed_dict={self.dataset:D, self.labels:L, self.lengths:l}
else:
feed_dict={self.dataset:D, self.labels:L}
predictions.extend(self.sess.run(self.correct_prediction, feed_dict)[:self.batch_size - gap])
vals.extend(self.sess.run(tf.argmax(self.logits,1), feed_dict)[:self.batch_size - gap])
print vals
## PRINT THE PREDICTONS
return 100.0*sum(predictions)/len(predictions)
示例2: accuracy
def accuracy(self):
if self._accuracy is None:
with tf.variable_scope('accuracy'):
correct_predictions = tf.equal(tf.argmax(self.inference, axis=1),
tf.argmax(tf.one_hot(self.targets, depth=self.n_classes), axis=1))
self._accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))
return self._accuracy
示例3: __init__
def __init__(self):
# Import data
error = None
for _ in range(10):
try:
self.mnist = input_data.read_data_sets(
"/tmp/tensorflow/mnist/input_data", one_hot=True)
error = None
break
except Exception as e:
error = e
time.sleep(5)
if error:
raise ValueError("Failed to import data", error)
# Set seed and build layers
tf.set_random_seed(0)
self.x = tf.placeholder(tf.float32, [None, 784], name="x")
self.y_ = tf.placeholder(tf.float32, [None, 10], name="y_")
y_conv, self.keep_prob = deepnn(self.x)
# Need to define loss and optimizer attributes
self.loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
labels=self.y_, logits=y_conv))
self.optimizer = tf.train.AdamOptimizer(1e-4)
self.variables = ray_tf_utils.TensorFlowVariables(
self.loss, tf.get_default_session())
# For evaluating test accuracy
correct_prediction = tf.equal(
tf.argmax(y_conv, 1), tf.argmax(self.y_, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
示例4: __init__
def __init__(self, learning_rate=0.001, ):
# 记录训练次数
self.global_step = tf.Variable(0, trainable=False)
# 学习速率
self.learning_rate = learning_rate
# 输入张量 28 * 28 = 784个像素的图片一维向量
self.x = tf.placeholder(tf.float32, [None, 784])
# 标签值,即图像对应的结果,如果对应数字是8,则对应label是 [0,0,0,0,0,0,0,0,1,0]
# 这种方式称为 one-hot编码
# 标签是一个长度为10的一维向量,值最大的下标即图片上写的数字
self.label = tf.placeholder(tf.float32, [None, 10])
# 权重,初始化 正态分布
self.w = tf.Variable(tf.random_normal([784, 10]))
# 偏置 bias, 初始化 正态分布
self.b = tf.Variable(tf.random_normal([10]))
# 输出 y = softmax(X * w + b)
self.y = tf.nn.softmax(tf.matmul(self.x, self.w) + self.b)
# 损失,即交叉熵,最常用的计算标签(label)与输出(y)之间差别的方法
self.loss = - tf.reduce_sum(self.label * tf.log(self.y + 1e-10))
# 反向传播,采用梯度下降的方法。调整w与b,使得损失(loss)最小
# loss越小,那么计算出来的y值与 标签(label)值越接近,准确率越高
# minimize 可传入参数 global_step, 每次训练 global_step的值会增加1
# 因此,可以通过计算self.global_step这个张量的值,知道当前训练了多少步
self.train = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.loss, global_step=self.global_step)
# 以下代码验证正确率时使用
# argmax 返回最大值的下标,最大值的下标即答案
# 例如 [0,0,0,0.9,0,0.1,0,0,0,0] 代表数字3
predict = tf.equal(tf.argmax(self.label, 1), tf.argmax(self.y, 1))
# predict -> [true, true, true, false, false, true]
# reduce_mean即求predict的平均数 即 正确个数 / 总数,即正确率
self.accuracy = tf.reduce_mean(tf.cast(predict, dtype=tf.float32))
示例5: cnn_setup
def cnn_setup(x, y, keep_prob, lr, stddev):
first_hidden = 32
second_hidden = 64
fc_hidden = 1024
W_conv1 = weight([5, 5, 1, first_hidden], stddev)
B_conv1 = bias([first_hidden])
x_image = tf.reshape(x, [-1, 28, 28, 1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + B_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight([5, 5, first_hidden, second_hidden], stddev)
b_conv2 = bias([second_hidden])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight([7 * 7 * second_hidden, fc_hidden], stddev)
b_fc1 = bias([fc_hidden])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * second_hidden])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight([fc_hidden, 10], stddev)
b_fc2 = bias([10])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
cross_entropy = tf.reduce_mean(
-tf.reduce_sum(y * tf.log(y_conv), reduction_indices=[1]))
correct_pred = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y, 1))
return (tf.train.AdamOptimizer(lr).minimize(cross_entropy),
tf.reduce_mean(tf.cast(correct_pred, tf.float32)), cross_entropy)
示例6: build
def build(self, configuration):
tf.reset_default_graph()
# --- specify input data
self.inputs = tf.placeholder(tf.float32, [None, 28, 28, 1], name='x')
self.labels = tf.placeholder(tf.float32, [None, 10], name='labels')
# tf.summary.image('input', inputs, 3)
# TODO add name scopes and summaries
# --- specify layers of network
# TODO try another strides for conv layer
# TODO try to get rid of pooling layer
conv1 = tf.layers.conv2d(inputs=self.inputs, filters=configuration[0], kernel_size=[5, 5], padding="same",
activation=tf.nn.relu, name='conv1')
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2, name='pool1')
conv2 = tf.layers.conv2d(inputs=pool1, filters=configuration[1], kernel_size=[5, 5], padding="same",
activation=tf.nn.relu, name='conv2')
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2, name='pool2')
flattened = tf.reshape(pool2, [-1, 7 * 7 * configuration[1]])
dense = tf.layers.dense(inputs=flattened, units=1024, activation=tf.nn.relu, name='fc')
logits = tf.layers.dense(inputs=dense, units=10, name='output')
# --- specify cost function and how training is performed
with tf.name_scope("train"):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=self.labels, logits=logits)
self.train_step = tf.train.AdamOptimizer(0.015).minimize(cross_entropy)
# --- specify function to calculate accuracy
with tf.name_scope("accuracy"):
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(self.labels, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("accuracy", self.accuracy)
self.summary = tf.summary.merge_all()
示例7: train
def train(self, eval_on_test=False):
""" Train model and save it to file.
Train model with given hidden layers. Training data is created
by prepare_training_data(), which must be called before this function.
"""
tf.reset_default_graph()
with tf.Session() as sess:
feature_data = tf.placeholder("float", [None, self.num_predictors])
labels = tf.placeholder("float", [None, self.num_classes])
layers = [self.num_predictors] + self.hidden_layers + [self.num_classes]
model = self.inference(feature_data, layers)
cost, cost_summary_op = self.loss(model, labels)
training_op = self.training(cost, learning_rate=0.0001)
correct_prediction = tf.equal(tf.argmax(model, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# Merge all variable summaries and save the results to log file
# summary_op = tf.merge_all_summaries()
accuracy_op_train = tf.scalar_summary("Accuracy on Train", accuracy)
summary_op_train = tf.merge_summary([cost_summary_op, accuracy_op_train])
if eval_on_test:
accuracy_op_test = tf.scalar_summary("Accuracy on Test", accuracy)
summary_op_test = tf.merge_summary([accuracy_op_test])
summary_writer = tf.train.SummaryWriter(self.log_dir + self.model_name, sess.graph)
train_dict = {
feature_data: self.training_predictors_tf.values,
labels: self.training_classes_tf.values.reshape(len(self.training_classes_tf.values), self.num_classes)}
if eval_on_test:
test_dict = {
feature_data: self.test_predictors_tf.values,
labels: self.test_classes_tf.values.reshape(len(self.test_classes_tf.values), self.num_classes)}
init = tf.initialize_all_variables()
sess.run(init)
for i in range(1, self.max_iteration):
sess.run(training_op, feed_dict=train_dict)
# Write summary to log
if i % 100 == 0:
summary_str = sess.run(summary_op_train, feed_dict=train_dict)
summary_writer.add_summary(summary_str, i)
if eval_on_test:
summary_str = sess.run(summary_op_test, feed_dict=test_dict)
summary_writer.add_summary(summary_str, i)
summary_writer.flush()
# Print current accuracy to console
if i%5000 == 0:
print (i, sess.run(accuracy, feed_dict=train_dict))
# Save trained parameters
saver = tf.train.Saver()
saver.save(sess, self.model_filename)
示例8: train_neural_network
def train_neural_network(X, Y):
predict = neural_network(X)
cost_func = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(predict, Y))
optimizer = tf.train.AdamOptimizer().minimize(cost_func) # learning rate 默认 0.001
epochs = 13
with tf.Session() as session:
session.run(tf.global_variables_initializer())
epoch_loss = 0
i = 0
random.shuffle(train_dataset)
train_x = dataset[:, 0]
train_y = dataset[:, 1]
for epoch in range(epochs):
while i < len(train_x):
start = i
end = i + batch_size
batch_x = train_x[start:end]
batch_y = train_y[start:end]
_, c = session.run([optimizer, cost_func], feed_dict={X: list(batch_x), Y: list(batch_y)})
epoch_loss += c
i += batch_size
print(epoch, ' : ', epoch_loss)
text_x = test_dataset[:, 0]
text_y = test_dataset[:, 1]
correct = tf.equal(tf.argmax(predict, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('准确率: ', accuracy.eval({X: list(text_x), Y: list(text_y)}))
示例9: __graph__
def __graph__():
"""Building the inference graph"""
with tf.name_scope('input'):
# [BATCH_SIZE, NUM_FEATURES]
x_input = tf.placeholder(dtype=tf.float32, shape=[None, self.num_features], name='x_input')
# [BATCH_SIZE]
y_input = tf.placeholder(dtype=tf.uint8, shape=[None], name='y_input')
# [BATCH_SIZE, NUM_CLASSES]
y_onehot = tf.one_hot(indices=y_input, depth=self.num_classes, on_value=1, off_value=-1,
name='y_onehot')
learning_rate = tf.placeholder(dtype=tf.float32, name='learning_rate')
with tf.name_scope('training_ops'):
with tf.name_scope('weights'):
weight = tf.get_variable(name='weights',
initializer=tf.random_normal([self.num_features, self.num_classes],
stddev=0.01))
self.variable_summaries(weight)
with tf.name_scope('biases'):
bias = tf.get_variable(name='biases', initializer=tf.constant([0.1], shape=[self.num_classes]))
self.variable_summaries(bias)
with tf.name_scope('Wx_plus_b'):
output = tf.matmul(x_input, weight) + bias
tf.summary.histogram('pre-activations', output)
with tf.name_scope('svm'):
regularization = tf.reduce_mean(tf.square(weight))
hinge_loss = tf.reduce_mean(tf.square(tf.maximum(tf.zeros([self.batch_size, self.num_classes]),
1 - tf.cast(y_onehot, tf.float32) * output)))
with tf.name_scope('loss'):
loss = regularization + self.svm_c * hinge_loss
tf.summary.scalar('loss', loss)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
with tf.name_scope('accuracy'):
predicted_class = tf.sign(output)
predicted_class = tf.identity(predicted_class, name='prediction')
with tf.name_scope('correct_prediction'):
correct = tf.equal(tf.argmax(predicted_class, 1), tf.argmax(y_onehot, 1))
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
tf.summary.scalar('accuracy', accuracy)
merged = tf.summary.merge_all()
self.x_input = x_input
self.y_input = y_input
self.y_onehot = y_onehot
self.learning_rate = learning_rate
self.loss = loss
self.optimizer = optimizer
self.output = output
self.predicted_class = predicted_class
self.accuracy = accuracy
self.merged = merged
示例10: tf_format_mnist_images
def tf_format_mnist_images(X, Y, Y_, n=100, lines=10):
correct_prediction = tf.equal(tf.argmax(Y,1), tf.argmax(Y_,1))
correctly_recognised_indices = tf.squeeze(tf.where(correct_prediction), [1]) # indices of correctly recognised images
incorrectly_recognised_indices = tf.squeeze(tf.where(tf.logical_not(correct_prediction)), [1]) # indices of incorrectly recognised images
everything_incorrect_first = tf.concat([incorrectly_recognised_indices, correctly_recognised_indices], 0) # images reordered with indeces of unrecognised images first
everything_incorrect_first = tf.slice(everything_incorrect_first, [0], [n]) # compute first 100 only - no space to display more anyway
# compute n=100 digits to display only
Xs = tf.gather(X, everything_incorrect_first)
Ys = tf.gather(Y, everything_incorrect_first)
Ys_ = tf.gather(Y_, everything_incorrect_first)
correct_prediction_s = tf.gather(correct_prediction, everything_incorrect_first)
digits_left = tf.image.grayscale_to_rgb(tensorflowvisu_digits.digits_left())
correct_tags = tf.gather(digits_left, tf.argmax(Ys_, 1)) # correct digits to be printed on the images
digits_right = tf.image.grayscale_to_rgb(tensorflowvisu_digits.digits_right())
computed_tags = tf.gather(digits_right, tf.argmax(Ys, 1)) # computed digits to be printed on the images
#superimposed_digits = correct_tags+computed_tags
superimposed_digits = tf.where(correct_prediction_s, tf.zeros_like(correct_tags),correct_tags+computed_tags) # only pring the correct and computed digits on unrecognised images
correct_bkg = tf.reshape(tf.tile([1.3,1.3,1.3], [28*28]), [1, 28,28,3]) # white background
incorrect_bkg = tf.reshape(tf.tile([1.3,1.0,1.0], [28*28]), [1, 28,28,3]) # red background
recognised_bkg = tf.gather(tf.concat([incorrect_bkg, correct_bkg], 0), tf.cast(correct_prediction_s, tf.int32)) # pick either the red or the white background depending on recognised status
I = tf.image.grayscale_to_rgb(Xs)
I = ((1-(I+superimposed_digits))*recognised_bkg)/1.3 # stencil extra data on top of images and reorder them unrecognised first
I = tf.image.convert_image_dtype(I, tf.uint8, saturate=True)
Islices = [] # 100 images => 10x10 image block
for imslice in range(lines):
Islices.append(tf.concat(tf.unstack(tf.slice(I, [imslice*n//lines,0,0,0], [n//lines,28,28,3])), 1))
I = tf.concat(Islices, 0)
return I
示例11: train_a_teacher_network
def train_a_teacher_network():
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
x_image = tf.reshape(x, [-1,28,28,1])
net = ops.conv2d(x_image, 32, [5, 5], scope='conv1', stddev=0.1, bias=0.1)
net = ops.max_pool(net, [2, 2], scope='pool1')
net = ops.conv2d(net, 64, [5, 5], scope='conv2', stddev=0.1, bias=0.1)
net = ops.max_pool(net, [2, 2], scope='pool2')
net = ops.flatten(net, scope='pool2_flat')
net = ops.fc(net, 1024, scope='fc1', stddev=0.1, bias=0.1)
net = ops.fc(net, 10, activation=None, scope='fc2', stddev=0.1, bias=0.1)
y_conv = tf.nn.softmax(net)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), axis=[1]))
model = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('loss', cross_entropy)
tf.summary.scalar('acc', accuracy)
merged = tf.summary.merge_all()
saver = tf.train.Saver()
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
writer = tf.summary.FileWriter('./logs', sess.graph)
sess.run(tf.global_variables_initializer())
print('Teacher Network...')
for i in range(MAX_ITER):
batch = mnist.train.next_batch(BATCH_SIZE)
sess.run(model, feed_dict={x: batch[0], y_: batch[1]})
# saver.save(sess, './my-model', global_step=TEST_ITER)
if i % 100 == 0:
summary_str, acc = sess.run([merged, accuracy], feed_dict={x: mnist.test.images, y_: mnist.test.labels})
writer.add_summary(summary_str, i)
print('[Iter: {}] Validation Accuracy : {:.4f}'.format(i,acc))
saver.save(sess, './my-model', global_step=TEST_ITER)
示例12: main
def main(_):
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.matmul(x, W) + b
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
# The raw formulation of cross-entropy,
#
# tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
# reduction_indices=[1]))
#
# can be numerically unstable.
#
# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw
# outputs of 'y', and then average across the batch.
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
# Train
for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images,
y_: mnist.test.labels}))
示例13: train
def train(args):
device = args.device
load_path = args.load_path
# load data
train_data = load_data('train')
val_data = load_data('validation')
# load model
with tf.device('/gpu:%d' % device):
model = get_model('policy')
# trainer init
optimizer = Config.optimizer
train_step = optimizer.minimize(model.loss)
# init session and server
sess = tf.InteractiveSession()
saver = tf.train.Saver()
if load_path==None:
sess.run(tf.initialize_all_variables())
else:
saver.restore(sess, load_path)
print("Model restored from %s" % load_path)
# accuracy
pred = tf.reshape(model.pred, [-1, 9*10*16])
label = tf.reshape(model.label, [-1, 9*10*16])
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(label,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
logging.basicConfig(filename='log.txt', level=logging.DEBUG)
# train steps
for i in range(Config.n_epoch):
# training step
batch_data, batch_label = train_data.next_batch(Config.minibatch_size)
input_dict = {model.label:batch_label}
for var, data in zip(model.inputs, batch_data):
input_dict[var]=data
#from IPython import embed;embed()
sess.run(train_step, feed_dict=input_dict)
# evalue step
if (i+1)%Config.evalue_point == 0:
batch_data, batch_label = val_data.next_batch(Config.minibatch_size)
val_dict = {model.label:batch_label}
for var, data in zip(model.inputs, batch_data):
val_dict[var]=data
score = accuracy.eval(feed_dict=val_dict)
print("epoch %d, accuracy is %.2f" % (i,score))
logging.info("epoch %d, accuracy is %.2f" % (i,score))
# save step
if (i+1)%Config.check_point == 0:
save_path = saver.save(sess, "%s/epoch-%d" %(Config.save_path, i))
print("Model saved in file: %s" % save_path)
logging.info("Model saved in file: %s" % save_path)
示例14: train_neural_network
def train_neural_network(x):
prediction = neural_network_model(x)
#using cross entropy with logits as our cost function
#calculates the difference between prediction and y(the labels on mnist data)
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(prediction,y) )
#AdamOptimizer is synonymous with SGD, AdaGrad, so on.
#learning rate = 0.001
optimizer = tf.train.AdamOptimizer().minimize(cost)
#epoch = (cycles of) feed forward + backprop
hm_epochs = 10
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(hm_epochs):
epoch_loss = 0
#training the data
for _ in range(int(mnist.train.num_examples/batch_size)):
#next_batch helper func from tf
epoch_x, epoch_y = mnist.train.next_batch(batch_size)
#how this optimizes cost is unclear yet -rbdmtodo
_, c = sess.run([optimizer, cost], feed_dict = {x: epoch_x, y: epoch_y})
epoch_loss += c
print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:',epoch_loss)
correct = tf.equal(tf.argmax(prediction,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:', accuracy.eval({x:mnist.test.images, y:mnist.test.labels}))
示例15: fit
def fit(self):
w = tf.Variable(tf.zeros([self.x_train.shape[1], self.y_train.shape[1]]))
b = tf.Variable(tf.zeros([self.y_train.shape[1]]))
activation = tf.nn.softmax(tf.matmul(self.x, w) + b)
cost = -tf.reduce_sum(self.y * tf.log(activation))
optimizer = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(cost)
self.init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(self.init)
for epoch in range(self.training_epochs):
avg_cost = 0.
if self.batch_size == -1:
self.batch_size = int(self.x_train.shape[0] / 10)
total_batch = int(self.x_train.shape[0] / self.batch_size)
for i in range(total_batch):
batch_xs = self.x_train[i * self.batch_size: (i + 1) * self.batch_size]
batch_ys = self.y_train[i * self.batch_size: (i + 1) * self.batch_size]
sess.run(optimizer, feed_dict={self.x: batch_xs, self.y: batch_ys})
avg_cost += sess.run(cost, feed_dict={self.x: batch_xs, self.y: batch_ys}) / total_batch
ZLog.info("Optimization Finished!")
self.pred = tf.argmax(activation, 1)
if self.x_test is not None:
correct_prediction = tf.equal(self.pred, tf.argmax(self.y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
ZLog.info("Accuracy:" + str(accuracy.eval({self.x: self.x_test, self.y: self.y_test})))