本文整理汇总了Python中reader.Reader方法的典型用法代码示例。如果您正苦于以下问题:Python reader.Reader方法的具体用法?Python reader.Reader怎么用?Python reader.Reader使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类reader
的用法示例。
在下文中一共展示了reader.Reader方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: infer
# 需要导入模块: import reader [as 别名]
# 或者: from reader import Reader [as 别名]
def infer(img_path, model_path, image_shape, label_dict_path):
# 获取标签字典
char_dict = load_dict(label_dict_path)
# 获取反转的标签字典
reversed_char_dict = load_reverse_dict(label_dict_path)
# 获取字典大小
dict_size = len(char_dict)
# 获取reader
my_reader = Reader(char_dict=char_dict, image_shape=image_shape)
# 初始化PaddlePaddle
paddle.init(use_gpu=True, trainer_count=1)
# 加载训练好的参数
parameters = paddle.parameters.Parameters.from_tar(gzip.open(model_path))
# 获取网络模型
model = Model(dict_size, image_shape, is_infer=True)
# 获取预测器
inferer = paddle.inference.Inference(output_layer=model.log_probs, parameters=parameters)
# 加载数据
test_batch = [[my_reader.load_image(img_path)]]
# 开始预测
return start_infer(inferer, test_batch, reversed_char_dict)
示例2: test
# 需要导入模块: import reader [as 别名]
# 或者: from reader import Reader [as 别名]
def test():
x_image = tf.placeholder(tf.float32, [None, 66, 200, 3])
y = tf.placeholder(tf.float32, [None, 1])
keep_prob = tf.placeholder(tf.float32)
model = Nivdia_Model(x_image, y, keep_prob, FLAGS, False)
# dataset reader
dataset = reader.Reader(FLAGS.data_dir, FLAGS)
# model saver used to resore model from model dir
saver = tf.train.Saver()
with tf.Session() as sess:
path = tf.train.latest_checkpoint(FLAGS.model_dir)
if not (path is None):
saver.restore(sess, path)
else:
print("There is not saved model in the directory of model.")
loss = batch_eval(model.loss, dataset.test, x_image, y, keep_prob, 500,
sess)
print("Loss (MSE) in test dataset:", loss)
mae = batch_eval(model.mae, dataset.test, x_image, y, keep_prob, 500,
sess)
print("MAE in test dataset: ", mae)
示例3: model
# 需要导入模块: import reader [as 别名]
# 或者: from reader import Reader [as 别名]
def model(self):
X_reader = Reader(self.X_train_file, name='X',
image_size=self.image_size, batch_size=self.batch_size)
Y_reader = Reader(self.Y_train_file, name='Y',
image_size=self.image_size, batch_size=self.batch_size)
x = X_reader.feed()
y = Y_reader.feed()
cycle_loss = self.cycle_consistency_loss(self.G, self.F, x, y)
# X -> Y
fake_y = self.G(x)
G_gan_loss = self.generator_loss(self.D_Y, fake_y, use_lsgan=self.use_lsgan)
G_loss = G_gan_loss + cycle_loss
D_Y_loss = self.discriminator_loss(self.D_Y, y, self.fake_y, use_lsgan=self.use_lsgan)
# Y -> X
fake_x = self.F(y)
F_gan_loss = self.generator_loss(self.D_X, fake_x, use_lsgan=self.use_lsgan)
F_loss = F_gan_loss + cycle_loss
D_X_loss = self.discriminator_loss(self.D_X, x, self.fake_x, use_lsgan=self.use_lsgan)
# summary
tf.summary.histogram('D_Y/true', self.D_Y(y))
tf.summary.histogram('D_Y/fake', self.D_Y(self.G(x)))
tf.summary.histogram('D_X/true', self.D_X(x))
tf.summary.histogram('D_X/fake', self.D_X(self.F(y)))
tf.summary.scalar('loss/G', G_gan_loss)
tf.summary.scalar('loss/D_Y', D_Y_loss)
tf.summary.scalar('loss/F', F_gan_loss)
tf.summary.scalar('loss/D_X', D_X_loss)
tf.summary.scalar('loss/cycle', cycle_loss)
tf.summary.image('X/generated', utils.batch_convert2int(self.G(x)))
tf.summary.image('X/reconstruction', utils.batch_convert2int(self.F(self.G(x))))
tf.summary.image('Y/generated', utils.batch_convert2int(self.F(y)))
tf.summary.image('Y/reconstruction', utils.batch_convert2int(self.G(self.F(y))))
return G_loss, D_Y_loss, F_loss, D_X_loss, fake_y, fake_x
示例4: predict
# 需要导入模块: import reader [as 别名]
# 或者: from reader import Reader [as 别名]
def predict(self, predict_data_lines):
if self.predict_queue is None:
self.predict_queue = reader.Reader(subtoken_to_index=self.subtoken_to_index,
node_to_index=self.node_to_index,
target_to_index=self.target_to_index,
config=self.config, is_evaluating=True)
self.predict_placeholder = tf.placeholder(tf.string)
reader_output = self.predict_queue.process_from_placeholder(self.predict_placeholder)
reader_output = {key: tf.expand_dims(tensor, 0) for key, tensor in reader_output.items()}
self.predict_top_indices_op, self.predict_top_scores_op, _, self.attention_weights_op = \
self.build_test_graph(reader_output)
self.predict_source_string = reader_output[reader.PATH_SOURCE_STRINGS_KEY]
self.predict_path_string = reader_output[reader.PATH_STRINGS_KEY]
self.predict_path_target_string = reader_output[reader.PATH_TARGET_STRINGS_KEY]
self.predict_target_strings_op = reader_output[reader.TARGET_STRING_KEY]
self.initialize_session_variables(self.sess)
self.saver = tf.train.Saver()
self.load_model(self.sess)
results = []
for line in predict_data_lines:
predicted_indices, top_scores, true_target_strings, attention_weights, path_source_string, path_strings, path_target_string = self.sess.run(
[self.predict_top_indices_op, self.predict_top_scores_op, self.predict_target_strings_op,
self.attention_weights_op,
self.predict_source_string, self.predict_path_string, self.predict_path_target_string],
feed_dict={self.predict_placeholder: line})
top_scores = np.squeeze(top_scores, axis=0)
path_source_string = path_source_string.reshape((-1))
path_strings = path_strings.reshape((-1))
path_target_string = path_target_string.reshape((-1))
predicted_indices = np.squeeze(predicted_indices, axis=0)
true_target_strings = Common.binary_to_string(true_target_strings[0])
if self.config.BEAM_WIDTH > 0:
predicted_strings = [[self.index_to_target[sugg] for sugg in timestep]
for timestep in predicted_indices] # (target_length, top-k)
predicted_strings = list(map(list, zip(*predicted_strings))) # (top-k, target_length)
top_scores = [np.exp(np.sum(s)) for s in zip(*top_scores)]
else:
predicted_strings = [self.index_to_target[idx]
for idx in predicted_indices] # (batch, target_length)
attention_per_path = None
if self.config.BEAM_WIDTH == 0:
attention_per_path = self.get_attention_per_path(path_source_string, path_strings, path_target_string,
attention_weights)
results.append((true_target_strings, predicted_strings, top_scores, attention_per_path))
return results
示例5: main
# 需要导入模块: import reader [as 别名]
# 或者: from reader import Reader [as 别名]
def main():
x_image = tf.placeholder(tf.float32, [None, 66, 200, 3])
keep_prob = tf.placeholder(tf.float32)
y = tf.placeholder(tf.float32, [None, 1])
model = Nivdia_Model(x_image, y, keep_prob, FLAGS, False)
# dataset reader
dataset = reader.Reader(FLAGS.data_dir, FLAGS)
saver = tf.train.Saver()
with tf.Session() as sess:
# initialize all varibales
sess.run(tf.global_variables_initializer())
# restore model
print(FLAGS.model_dir)
path = tf.train.latest_checkpoint(FLAGS.model_dir)
if path is None:
print("Err: the model does NOT exist")
exit(0)
else:
saver.restore(sess, path)
print("Restore model from", path)
batch_x, batch_y = dataset.train.next_batch(FLAGS.visualization_num,
False)
y_pred = sess.run(
model.prediction, feed_dict={
x_image: batch_x,
keep_prob: 1.0
})
masks = sess.run(
model.visualization_mask,
feed_dict={
x_image: batch_x,
keep_prob: 1.0
})
if not os.path.exists(FLAGS.result_dir):
os.makedirs(FLAGS.result_dir)
for i in range(FLAGS.visualization_num):
image, mask, overlay = visualize(batch_x[i], masks[i])
cv2.imwrite(
os.path.join(FLAGS.result_dir, "image_" + str(i) + ".jpg"), image)
cv2.imwrite(
os.path.join(FLAGS.result_dir, "mask_" + str(i) + ".jpg"), mask)
cv2.imwrite(
os.path.join(FLAGS.result_dir, "overlay_" + str(i) + ".jpg"),
overlay)
示例6: model
# 需要导入模块: import reader [as 别名]
# 或者: from reader import Reader [as 别名]
def model(self):
X_reader = Reader(self.X_train_file, name='X',
image_size1=self.image_size1, image_size2=self.image_size2, batch_size=self.batch_size)
Y_reader = Reader(self.Y_train_file, name='Y',
image_size1=self.image_size1, image_size2=self.image_size2, batch_size=self.batch_size)
x = X_reader.feed()
y = Y_reader.feed()
cycle_loss = self.cycle_consistency_loss(self.G, self.F, x, y)
perceptual_loss = self.perceptual_similarity_loss(self.G, self.F, x, y, self.vgg)
# X -> Y
fake_y = self.G(x)
G_gan_loss = self.generator_loss(self.D_Y, fake_y, use_lsgan=self.use_lsgan)
G_loss = G_gan_loss + cycle_loss + perceptual_loss #+ pixel_loss
D_Y_loss = self.discriminator_loss(self.D_Y, y, self.fake_y, use_lsgan=self.use_lsgan)
# Y -> X
fake_x = self.F(y)
F_gan_loss = self.generator_loss(self.D_X, fake_x, use_lsgan=self.use_lsgan)
F_loss = F_gan_loss + cycle_loss + perceptual_loss #+ pixel_loss
D_X_loss = self.discriminator_loss(self.D_X, x, self.fake_x, use_lsgan=self.use_lsgan)
# summary
tf.summary.histogram('D_Y/true', self.D_Y(y))
tf.summary.histogram('D_Y/fake', self.D_Y(self.G(x)))
tf.summary.histogram('D_X/true', self.D_X(x))
tf.summary.histogram('D_X/fake', self.D_X(self.F(y)))
tf.summary.scalar('loss/G', G_gan_loss)
tf.summary.scalar('loss/D_Y', D_Y_loss)
tf.summary.scalar('loss/F', F_gan_loss)
tf.summary.scalar('loss/D_X', D_X_loss)
tf.summary.scalar('loss/cycle', cycle_loss)
tf.summary.scalar('loss/perceptual_loss', perceptual_loss)
#tf.summary.scalar('loss/pixel_loss', pixel_loss)
tf.summary.image('X/generated', utils.batch_convert2int(self.G(x)))
tf.summary.image('X/reconstruction', utils.batch_convert2int(self.F(self.G(x))))
tf.summary.image('Y/generated', utils.batch_convert2int(self.F(y)))
tf.summary.image('Y/reconstruction', utils.batch_convert2int(self.G(self.F(y))))
return G_loss, D_Y_loss, F_loss, D_X_loss, fake_y, fake_x