本文整理匯總了Python中utils.save_images方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.save_images方法的具體用法?Python utils.save_images怎麽用?Python utils.save_images使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類utils
的用法示例。
在下文中一共展示了utils.save_images方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: train
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import save_images [as 別名]
def train(dataset, network, stat, sample_dir):
initial_step = stat.get_t()
logger.info("Training starts on epoch {}".format(initial_step))
train_step_per_epoch = dataset.train.num_examples / conf.batch_size
test_step_per_epoch = dataset.test.num_examples / conf.batch_size
for epoch in range(initial_step, conf.max_epoch):
start_time = time.time()
# 1. train
total_train_costs = []
for _ in xrange(train_step_per_epoch):
images = dataset.train.next_batch(conf.batch_size)
cost = network.test(images, with_update=True)
total_train_costs.append(cost)
# 2. test
total_test_costs = []
for _ in xrange(test_step_per_epoch):
images = dataset.test.next_batch(conf.batch_size)
cost = network.test(images, with_update=False)
total_test_costs.append(cost)
avg_train_cost, avg_test_cost = np.mean(total_train_costs), np.mean(total_test_costs)
stat.on_step(avg_train_cost, avg_test_cost)
# 3. generate samples
images, _ = dataset.test.next_batch(conf.batch_size)
samples, occluded = generate_from_occluded(network, images)
util.save_images(np.concatenate((occluded, samples), axis=2),
dataset.height, dataset.width * 2, conf.num_generated_images, 1,
directory=sample_dir, prefix="epoch_%s" % epoch)
logger.info("Epoch {}: {:.2f} seconds, avg train cost: {:.3f}, avg test cost: {:.3f}"
.format(epoch,(time.time() - start_time), avg_train_cost, avg_test_cost))
示例2: generate
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import save_images [as 別名]
def generate(network, height, width, sample_dir):
logger.info("Image generation starts")
samples = network.generate()
util.save_images(samples, height, width, 10, 10, directory=sample_dir)
示例3: make_sample_grid_and_save
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import save_images [as 別名]
def make_sample_grid_and_save(est, dataset_name, dataset_parent_dir, grid_dims,
output_dir, cur_nimg):
"""Evaluate a fixed set of validation images and save output.
Args:
est: tf,estimator.Estimator, TF estimator to run the predictions.
dataset_name: basename for the validation tfrecord from which to load
validation images.
dataset_parent_dir: path to a directory containing the validation tfrecord.
grid_dims: 2-tuple int for the grid size (1 unit = 1 image).
output_dir: string, where to save image samples.
cur_nimg: int, current number of images seen by training.
Returns:
None.
"""
num_examples = grid_dims[0] * grid_dims[1]
def input_val_fn():
dict_inp = data.provide_data(
dataset_name=dataset_name, parent_dir=dataset_parent_dir, subset='val',
batch_size=1, crop_flag=True, crop_size=opts.train_resolution,
seeds=[0], max_examples=num_examples,
use_appearance=opts.use_appearance, shuffle=0)
x_in = dict_inp['conditional_input']
x_gt = dict_inp['expected_output'] # ground truth output
x_app = dict_inp['peek_input']
return x_in, x_gt, x_app
def est_input_val_fn():
x_in, _, x_app = input_val_fn()
features = {'conditional_input': x_in, 'peek_input': x_app}
return features
images = [x for x in est.predict(est_input_val_fn)]
images = np.array(images, 'f')
images = images.reshape(grid_dims + images.shape[1:])
utils.save_images(utils.to_png(utils.images_to_grid(images)), output_dir,
cur_nimg)
示例4: visualize_results
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import save_images [as 別名]
def visualize_results(self, epoch, fix=True):
self.G.eval()
if not os.path.exists(self.result_dir + '/' + self.dataset + '/' + self.model_name):
os.makedirs(self.result_dir + '/' + self.dataset + '/' + self.model_name)
tot_num_samples = min(self.sample_num, self.batch_size)
image_frame_dim = int(np.floor(np.sqrt(tot_num_samples)))
if fix:
""" fixed noise """
samples = self.G(self.sample_z_)
else:
""" random noise """
if self.gpu_mode:
sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)).cuda(), volatile=True)
else:
sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)), volatile=True)
samples = self.G(sample_z_)
if self.gpu_mode:
samples = samples.cpu().data.numpy().transpose(0, 2, 3, 1)
else:
samples = samples.data.numpy().transpose(0, 2, 3, 1)
utils.save_images(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
self.result_dir + '/' + self.dataset + '/' + self.model_name + '/' + self.model_name + '_epoch%03d' % epoch + '.png')
示例5: visualize_results
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import save_images [as 別名]
def visualize_results(self, epoch):
self.G.eval()
if not os.path.exists(self.result_dir + '/' + self.dataset + '/' + self.model_name):
os.makedirs(self.result_dir + '/' + self.dataset + '/' + self.model_name)
image_frame_dim = int(np.floor(np.sqrt(self.sample_num)))
""" style by class """
samples = self.G(self.sample_z_, self.sample_c_, self.sample_y_)
if self.gpu_mode:
samples = samples.cpu().data.numpy().transpose(0, 2, 3, 1)
else:
samples = samples.data.numpy().transpose(0, 2, 3, 1)
utils.save_images(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
self.result_dir + '/' + self.dataset + '/' + self.model_name + '/' + self.model_name + '_epoch%03d' % epoch + '.png')
""" manipulating two continous codes """
samples = self.G(self.sample_z2_, self.sample_c2_, self.sample_y2_)
if self.gpu_mode:
samples = samples.cpu().data.numpy().transpose(0, 2, 3, 1)
else:
samples = samples.data.numpy().transpose(0, 2, 3, 1)
utils.save_images(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
self.result_dir + '/' + self.dataset + '/' + self.model_name + '/' + self.model_name + '_cont_epoch%03d' % epoch + '.png')
示例6: visualize_results
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import save_images [as 別名]
def visualize_results(self, epoch, fix=True):
self.G.eval()
if not os.path.exists(self.result_dir + '/' + self.dataset + '/' + self.model_name):
os.makedirs(self.result_dir + '/' + self.dataset + '/' + self.model_name)
image_frame_dim = int(np.floor(np.sqrt(self.sample_num)))
if fix:
""" fixed noise """
samples = self.G(self.sample_z_, self.sample_y_)
else:
""" random noise """
temp = torch.LongTensor(self.batch_size, 1).random_() % 10
sample_y_ = torch.FloatTensor(self.batch_size, 10)
sample_y_.zero_()
sample_y_.scatter_(1, temp, 1)
if self.gpu_mode:
sample_z_, sample_y_ = Variable(torch.rand((self.batch_size, self.z_dim)).cuda(), volatile=True), \
Variable(sample_y_.cuda(), volatile=True)
else:
sample_z_, sample_y_ = Variable(torch.rand((self.batch_size, self.z_dim)), volatile=True), \
Variable(sample_y_, volatile=True)
samples = self.G(sample_z_, sample_y_)
if self.gpu_mode:
samples = samples.cpu().data.numpy().transpose(0, 2, 3, 1)
else:
samples = samples.data.numpy().transpose(0, 2, 3, 1)
utils.save_images(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
self.result_dir + '/' + self.dataset + '/' + self.model_name + '/' + self.model_name + '_epoch%03d' % epoch + '.png')
示例7: visualize_results
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import save_images [as 別名]
def visualize_results(self, epoch, fix=True):
self.G.eval()
if not os.path.exists(self.result_dir + '/' + self.dataset + '/' + self.model_name):
os.makedirs(self.result_dir + '/' + self.dataset + '/' + self.model_name)
tot_num_samples = min(self.sample_num, self.batch_size)
image_frame_dim = int(np.floor(np.sqrt(tot_num_samples)))
if fix:
""" fixed noise """
samples = self.G(self.sample_z_)
else:
""" random noise """
if self.gpu_mode:
sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)).cuda(), volatile=True)
else:
sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)), volatile=True)
samples = self.G(sample_z_)
if self.gpu_mode:
samples = samples.cpu().data.numpy().transpose(0, 2, 3, 1)
else:
samples = samples.data.numpy().transpose(0, 2, 3, 1)
utils.save_images(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
self.result_dir + '/' + self.dataset + '/' + self.model_name + '/' + self.model_name + '_epoch%03d' % epoch + '.png')
示例8: visualize_results
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import save_images [as 別名]
def visualize_results(self, epoch, fix=True):
if not self.result_path.exists():
self.result_path.mkdir()
self.G.eval()
# test_data_loader
original_, sketch_, iv_tag_, cv_tag_ = self.test_images
image_frame_dim = int(np.ceil(np.sqrt(len(original_))))
# iv_tag_ to feature tensor 16 * 16 * 256 by pre-reained Sketch.
with torch.no_grad():
feature_tensor = self.Pretrain_ResNeXT(sketch_)
if self.gpu_mode:
original_, sketch_, iv_tag_, cv_tag_, feature_tensor = original_.to(self.device), sketch_.to(self.device), iv_tag_.to(self.device), cv_tag_.to(self.device), feature_tensor.to(self.device)
G_f, G_g = self.G(sketch_, feature_tensor, cv_tag_)
if self.gpu_mode:
G_f = G_f.cpu()
G_g = G_g.cpu()
G_f = self.color_revert(G_f)
G_g = self.color_revert(G_g)
utils.save_images(G_f[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
self.result_path / 'tag2pix_epoch{:03d}_G_f.png'.format(epoch))
utils.save_images(G_g[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
self.result_path / 'tag2pix_epoch{:03d}_G_g.png'.format(epoch))
示例9: get_test_data
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import save_images [as 別名]
def get_test_data(self, test_data_loader, count):
test_count = 0
original_, sketch_, iv_tag_, cv_tag_ = [], [], [], []
for orig, sket, ivt, cvt in test_data_loader:
original_.append(orig)
sketch_.append(sket)
iv_tag_.append(ivt)
cv_tag_.append(cvt)
test_count += len(orig)
if test_count >= count:
break
original_ = torch.cat(original_, 0)
sketch_ = torch.cat(sketch_, 0)
iv_tag_ = torch.cat(iv_tag_, 0)
cv_tag_ = torch.cat(cv_tag_, 0)
self.save_tag_tensor_name(iv_tag_, cv_tag_, self.result_path / "test_image_tags.txt")
image_frame_dim = int(np.ceil(np.sqrt(len(original_))))
if self.gpu_mode:
original_ = original_.cpu()
sketch_np = sketch_.data.numpy().transpose(0, 2, 3, 1)
original_np = self.color_revert(original_)
utils.save_images(original_np[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
self.result_path / 'tag2pix_original.png')
utils.save_images(sketch_np[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
self.result_path / 'tag2pix_sketch.png')
return original_, sketch_, iv_tag_, cv_tag_
示例10: main
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import save_images [as 別名]
def main():
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
parser = argparse.ArgumentParser(description='Test trained models')
parser.add_argument('--options-file', '-o', default='options-and-config.pickle', type=str,
help='The file where the simulation options are stored.')
parser.add_argument('--checkpoint-file', '-c', required=True, type=str, help='Model checkpoint file')
parser.add_argument('--batch-size', '-b', default=12, type=int, help='The batch size.')
parser.add_argument('--source-image', '-s', required=True, type=str,
help='The image to watermark')
# parser.add_argument('--times', '-t', default=10, type=int,
# help='Number iterations (insert watermark->extract).')
args = parser.parse_args()
train_options, hidden_config, noise_config = utils.load_options(args.options_file)
noiser = Noiser(noise_config)
checkpoint = torch.load(args.checkpoint_file)
hidden_net = Hidden(hidden_config, device, noiser, None)
utils.model_from_checkpoint(hidden_net, checkpoint)
image_pil = Image.open(args.source_image)
image = randomCrop(np.array(image_pil), hidden_config.H, hidden_config.W)
image_tensor = TF.to_tensor(image).to(device)
image_tensor = image_tensor * 2 - 1 # transform from [0, 1] to [-1, 1]
image_tensor.unsqueeze_(0)
# for t in range(args.times):
message = torch.Tensor(np.random.choice([0, 1], (image_tensor.shape[0],
hidden_config.message_length))).to(device)
losses, (encoded_images, noised_images, decoded_messages) = hidden_net.validate_on_batch([image_tensor, message])
decoded_rounded = decoded_messages.detach().cpu().numpy().round().clip(0, 1)
message_detached = message.detach().cpu().numpy()
print('original: {}'.format(message_detached))
print('decoded : {}'.format(decoded_rounded))
print('error : {:.3f}'.format(np.mean(np.abs(decoded_rounded - message_detached))))
utils.save_images(image_tensor.cpu(), encoded_images.cpu(), 'test', '.', resize_to=(256, 256))
# bitwise_avg_err = np.sum(np.abs(decoded_rounded - message.detach().cpu().numpy()))/(image_tensor.shape[0] * messages.shape[1])