本文整理汇总了Python中model.inference方法的典型用法代码示例。如果您正苦于以下问题:Python model.inference方法的具体用法?Python model.inference怎么用?Python model.inference使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类model
的用法示例。
在下文中一共展示了model.inference方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: loadNetwork
# 需要导入模块: import model [as 别名]
# 或者: from model import inference [as 别名]
def loadNetwork(path, sess, model_name):
img = tf.placeholder(dtype = tf.float32, shape = (None, None, None, 3))
with tf.variable_scope(model_name):
pred = inference(img, 68 if model_name=='my_model' else 17)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
variables_to_restore = tf.global_variables()
dic = {}
for i in variables_to_restore:
if 'global_step' not in i.name and 'Adam' not in i.name:
dic[str(i.op.name).replace(model_name+'/', 'my_model/')] = i
init_fn = assign_from_checkpoint_fn(os.path.join(path, 'snapshot'), dic, ignore_missing_vars = True)
init_fn(sess)
def func(imgs):
output = sess.run(pred, feed_dict={img: imgs})
return {
'det': output[:,:,:,:17],
'tag': output[:,:,:,-17:]
}
return func
示例2: evaluate
# 需要导入模块: import model [as 别名]
# 或者: from model import inference [as 别名]
def evaluate():
"""Eval MNIST for a number of steps."""
with tf.Graph().as_default() as g:
# Get images and labels for MNIST.
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=False)
images = mnist.test.images
labels = mnist.test.labels
# Build a Graph that computes the logits predictions from the
# inference model.
logits = model.inference(images, keep_prob=1.0)
# Calculate predictions.
top_k_op = tf.nn.in_top_k(predictions=logits, targets=labels, k=1)
# Create saver to restore the learned variables for eval.
saver = tf.train.Saver()
eval_once(saver, top_k_op)
示例3: tower_loss
# 需要导入模块: import model [as 别名]
# 或者: from model import inference [as 别名]
def tower_loss(scope):
"""Calculate the total loss on a single tower running the MNIST model.
Args:
scope: unique prefix string identifying the MNIST tower, e.g. 'tower_0'
Returns:
Tensor of shape [] containing the total loss for a batch of data
"""
# Get images and labels for MSNIT.
images, labels = model.inputs(FLAGS.batch_size)
# Build inference Graph.
logits = model.inference(images, keep_prob=0.5)
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
_ = model.loss(logits, labels)
# Assemble all of the losses for the current tower only.
losses = tf.get_collection('losses', scope)
# Calculate the total loss for the current tower.
total_loss = tf.add_n(losses, name='total_loss')
# Attach a scalar summary to all individual losses and the total loss; do
# the same for the averaged version of the losses.
if (FLAGS.tb_logging):
for l in losses + [total_loss]:
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU
# training session. This helps the clarity of presentation on
# tensorboard.
loss_name = re.sub('%s_[0-9]*/' % model.TOWER_NAME, '', l.op.name)
tf.summary.scalar(loss_name, l)
return total_loss
示例4: main
# 需要导入模块: import model [as 别名]
# 或者: from model import inference [as 别名]
def main(_):
with tf.Graph().as_default():
config = tf.ConfigProto()
config.gpu_options.allocator_type = 'BFC'
sess = tf.InteractiveSession(config=config)
x_image = tf.placeholder(tf.float32, shape=[None, 66, 200, 3], name="x_image")
y_label = tf.placeholder(tf.float32, shape=[None, 1], name="y_label")
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
y_pred = model.inference(x_image, keep_prob)
norm, losses, total_loss = loss(y_pred, y_label)
train_op = train(total_loss)
merged_summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter('train', sess.graph)
saver = tf.train.Saver(write_version=tf.train.SaverDef.V2)
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
checkpoint_path = os.path.join(LOG_DIR, "steering.ckpt")
sess.run(tf.global_variables_initializer())
udacity_data.read_data()
for epoch in range(EPOCH):
for i in range(STEP_PER_EPOCH):
steps = epoch * STEP_PER_EPOCH + i
xs, ys = udacity_data.load_train_batch(BATCH_SIZE)
_, summary = sess.run([train_op, merged_summary_op],
feed_dict={x_image: xs, y_label: ys, keep_prob: 0.7})
if i % 10 == 0:
xs, ys = udacity_data.load_val_batch(BATCH_SIZE)
loss_value = losses.eval(feed_dict={x_image: xs, y_label: ys, keep_prob: 1.0})
print("Epoch: %d, Step: %d, Loss: %g" % (epoch, steps, loss_value))
# write logs at every iteration
summary_writer.add_summary(summary, steps)
if i % 32 == 0:
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
saver.save(sess, checkpoint_path)
示例5: evaluate_one_image
# 需要导入模块: import model [as 别名]
# 或者: from model import inference [as 别名]
def evaluate_one_image(image_array):
with tf.Graph().as_default():
BATCH_SIZE = 1
N_CLASSES = 4
image = tf.cast(image_array, tf.float32)
image = tf.image.per_image_standardization(image)
image = tf.reshape(image, [1, 64, 64, 3])
logit = model.inference(image, BATCH_SIZE, N_CLASSES)
logit = tf.nn.softmax(logit)
x = tf.placeholder(tf.float32, shape=[64, 64, 3])
# you need to change the directories to yours.
logs_train_dir = 'C:/Users/74182/Desktop/flower_world-master/save'
saver = tf.train.Saver()
with tf.Session() as sess:
print("Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(logs_train_dir)
if ckpt and ckpt.model_checkpoint_path:
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
saver.restore(sess, ckpt.model_checkpoint_path)
print('Loading success, global_step is %s' % global_step)
else:
print('No checkpoint file found')
prediction = sess.run(logit, feed_dict={x: image_array})
max_index = np.argmax(prediction)
if max_index == 0:
result = ('这是玫瑰花的可能性为: %.6f' % prediction[:, 0])
elif max_index == 1:
result = ('这是郁金香的可能性为: %.6f' % prediction[:, 1])
elif max_index == 2:
result = ('这是蒲公英的可能性为: %.6f' % prediction[:, 2])
else:
result = ('这是这是向日葵的可能性为: %.6f' % prediction[:, 3])
return result
# ------------------------------------------------------------------------
示例6: evaluate_one_image
# 需要导入模块: import model [as 别名]
# 或者: from model import inference [as 别名]
def evaluate_one_image(image_array):
with tf.Graph().as_default():
BATCH_SIZE = 1
N_CLASSES = 4
image = tf.cast(image_array, tf.float32)
image = tf.image.per_image_standardization(image)
image = tf.reshape(image, [1, 64, 64, 3])
logit = model.inference(image, BATCH_SIZE, N_CLASSES)
logit = tf.nn.softmax(logit)
x = tf.placeholder(tf.float32, shape=[64, 64, 3])
# you need to change the directories to yours.
logs_train_dir = 'D:/ML/flower/save/'
saver = tf.train.Saver()
with tf.Session() as sess:
print("Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(logs_train_dir)
if ckpt and ckpt.model_checkpoint_path:
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
saver.restore(sess, ckpt.model_checkpoint_path)
print('Loading success, global_step is %s' % global_step)
else:
print('No checkpoint file found')
prediction = sess.run(logit, feed_dict={x: image_array})
max_index = np.argmax(prediction)
if max_index == 0:
result = ('这是玫瑰花的可能性为: %.6f' % prediction[:, 0])
elif max_index == 1:
result = ('这是郁金香的可能性为: %.6f' % prediction[:, 1])
elif max_index == 2:
result = ('这是蒲公英的可能性为: %.6f' % prediction[:, 2])
else:
result = ('这是这是向日葵的可能性为: %.6f' % prediction[:, 3])
return result
# ------------------------------------------------------------------------
示例7: train
# 需要导入模块: import model [as 别名]
# 或者: from model import inference [as 别名]
def train():
with tf.Graph().as_default():
# global step number
global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)
dataset = DataSet()
# get training set
print("The number of training images is: %d" % (dataset.cnt_samples(FLAGS.predictcsv)))
csv_predict = FLAGS.predictcsv
lines = dataset.load_csv(csv_predict)
lines.sort()
images_ph = tf.placeholder(tf.float32, [1, 229, 229, 3])
num_classes = FLAGS.num_classes
restore_logits = not FLAGS.fine_tune
# inference
logits = model.inference(images_ph, num_classes, for_training=False, restore_logits=restore_logits)
# Retain the summaries from the final tower.
batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION)
# saver
saver = tf.train.Saver(tf.all_variables())
# Build the summary operation from the last tower summaries.
summary_op = tf.merge_all_summaries()
# initialization
init = tf.initialize_all_variables()
# session
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and ckpt.model_checkpoint_path:
print("load: checkpoint %s" % (ckpt.model_checkpoint_path))
saver.restore(sess, ckpt.model_checkpoint_path)
print("start to predict.")
for step, line in enumerate(lines):
pil_img = Image.open(line[0])
pil_img = pil_img.resize((250, 250))
img_array_r = np.asarray(pil_img)
img_array_r = img_array_r[15:244,15:244,:]
img_array = img_array_r[None, ...]
softmax_eval = sess.run([logits[2]], feed_dict={images_ph: img_array})
print("%s,%s,%s" % (line[0], line[1], np.argmax(softmax_eval)))
print("finish to predict.")
coord.request_stop()
coord.join(threads)
sess.close()