当前位置: 首页>>代码示例>>Python>>正文


Python SummaryWriter.add_image方法代码示例

本文整理汇总了Python中tensorboardX.SummaryWriter.add_image方法的典型用法代码示例。如果您正苦于以下问题:Python SummaryWriter.add_image方法的具体用法?Python SummaryWriter.add_image怎么用?Python SummaryWriter.add_image使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorboardX.SummaryWriter的用法示例。


在下文中一共展示了SummaryWriter.add_image方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_image [as 别名]
class TBVisualizer:
    def __init__(self, opt):
        self._opt = opt
        self._save_path = os.path.join(opt.checkpoints_dir, opt.name)

        self._log_path = os.path.join(self._save_path, 'loss_log2.txt')
        self._tb_path = os.path.join(self._save_path, 'summary.json')
        self._writer = SummaryWriter(self._save_path)

        with open(self._log_path, "a") as log_file:
            now = time.strftime("%c")
            log_file.write('================ Training Loss (%s) ================\n' % now)

    def __del__(self):
        self._writer.close()

    def display_current_results(self, visuals, it, is_train, save_visuals=False):
        for label, image_numpy in visuals.items():
            sum_name = '{}/{}'.format('Train' if is_train else 'Test', label)
            self._writer.add_image(sum_name, image_numpy, it)

            if save_visuals:
                util.save_image(image_numpy,
                                os.path.join(self._opt.checkpoints_dir, self._opt.name,
                                             'event_imgs', sum_name, '%08d.png' % it))

        self._writer.export_scalars_to_json(self._tb_path)

    def plot_scalars(self, scalars, it, is_train):
        for label, scalar in scalars.items():
            sum_name = '{}/{}'.format('Train' if is_train else 'Test', label)
            self._writer.add_scalar(sum_name, scalar, it)

    def print_current_train_errors(self, epoch, i, iters_per_epoch, errors, t, visuals_were_stored):
        log_time = time.strftime("[%d/%m/%Y %H:%M:%S]")
        visuals_info = "v" if visuals_were_stored else ""
        message = '%s (T%s, epoch: %d, it: %d/%d, t/smpl: %.3fs) ' % (log_time, visuals_info, epoch, i, iters_per_epoch, t)
        for k, v in errors.items():
            message += '%s:%.3f ' % (k, v)

        print(message)
        with open(self._log_path, "a") as log_file:
            log_file.write('%s\n' % message)

    def print_current_validate_errors(self, epoch, errors, t):
        log_time = time.strftime("[%d/%m/%Y %H:%M:%S]")
        message = '%s (V, epoch: %d, time_to_val: %ds) ' % (log_time, epoch, t)
        for k, v in errors.items():
            message += '%s:%.3f ' % (k, v)

        print(message)
        with open(self._log_path, "a") as log_file:
            log_file.write('%s\n' % message)

    def save_images(self, visuals):
        for label, image_numpy in visuals.items():
            image_name = '%s.png' % label
            save_path = os.path.join(self._save_path, "samples", image_name)
            util.save_image(image_numpy, save_path)
开发者ID:iGuaZi,项目名称:GANimation,代码行数:61,代码来源:tb_visualizer.py

示例2: train

# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_image [as 别名]
    def train(self, epoch_to_restore=0):
        g = Generator(self.nb_channels_first_layer, self.dim)

        if epoch_to_restore > 0:
            filename_model = os.path.join(self.dir_models, 'epoch_{}.pth'.format(epoch_to_restore))
            g.load_state_dict(torch.load(filename_model))
        else:
            g.apply(weights_init)

        g.cuda()
        g.train()

        dataset = EmbeddingsImagesDataset(self.dir_z_train, self.dir_x_train)
        dataloader = DataLoader(dataset, self.batch_size, shuffle=True, num_workers=4, pin_memory=True)
        fixed_dataloader = DataLoader(dataset, 16)
        fixed_batch = next(iter(fixed_dataloader))

        criterion = torch.nn.L1Loss()

        optimizer = optim.Adam(g.parameters())
        writer = SummaryWriter(self.dir_logs)

        try:
            epoch = epoch_to_restore
            while True:
                g.train()
                for _ in range(self.nb_epochs_to_save):
                    epoch += 1

                    for idx_batch, current_batch in enumerate(tqdm(dataloader)):
                        g.zero_grad()
                        x = Variable(current_batch['x']).type(torch.FloatTensor).cuda()
                        z = Variable(current_batch['z']).type(torch.FloatTensor).cuda()
                        g_z = g.forward(z)

                        loss = criterion(g_z, x)
                        loss.backward()
                        optimizer.step()

                    writer.add_scalar('train_loss', loss, epoch)

                z = Variable(fixed_batch['z']).type(torch.FloatTensor).cuda()
                g.eval()
                g_z = g.forward(z)
                images = make_grid(g_z.data[:16], nrow=4, normalize=True)
                writer.add_image('generations', images, epoch)
                filename = os.path.join(self.dir_models, 'epoch_{}.pth'.format(epoch))
                torch.save(g.state_dict(), filename)

        finally:
            print('[*] Closing Writer.')
            writer.close()
开发者ID:chouqin3,项目名称:generative-scattering-networks,代码行数:54,代码来源:GSN.py

示例3: get_output_folder

# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_image [as 别名]
    args = parser.parse_args()

    if args.resume is None:
        args.output = get_output_folder(args.output, args.env)
    else:
        args.output = args.resume

    bullet = ("Bullet" in args.env)
    if bullet:
        import pybullet
        import pybullet_envs

    if args.env == "Paint":
        from env import CanvasEnv
        env = CanvasEnv()
        writer.add_image('circle.png', env.target)
    elif args.env == "KukaGym":
        env = KukaGymEnv(renders=False, isDiscrete=True)
    elif args.env == "LTR":
        from osim.env import RunEnv
        env = RunEnv(visualize=False)
    elif args.discrete:        
        env = gym.make(args.env)
        env = env.unwrapped
    else:
        env = NormalizedEnv(gym.make(args.env))

    # input random seed
    if args.seed > 0:
        np.random.seed(args.seed)
        env.seed(args.seed)
开发者ID:megvii-rl,项目名称:pytorch-gym,代码行数:33,代码来源:main.py

示例4: SummaryWorker

# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_image [as 别名]
class SummaryWorker(multiprocessing.Process):
    def __init__(self, env):
        super(SummaryWorker, self).__init__()
        self.env = env
        self.config = env.config
        self.queue = multiprocessing.Queue()
        try:
            self.timer_scalar = utils.train.Timer(env.config.getfloat('summary', 'scalar'))
        except configparser.NoOptionError:
            self.timer_scalar = lambda: False
        try:
            self.timer_image = utils.train.Timer(env.config.getfloat('summary', 'image'))
        except configparser.NoOptionError:
            self.timer_image = lambda: False
        try:
            self.timer_histogram = utils.train.Timer(env.config.getfloat('summary', 'histogram'))
        except configparser.NoOptionError:
            self.timer_histogram = lambda: False
        with open(os.path.expanduser(os.path.expandvars(env.config.get('summary_histogram', 'parameters'))), 'r') as f:
            self.histogram_parameters = utils.RegexList([line.rstrip() for line in f])
        self.draw_bbox = utils.visualize.DrawBBox(env.config, env.category)
        self.draw_iou = utils.visualize.DrawIou(env.config)

    def __call__(self, name, **kwargs):
        if getattr(self, 'timer_' + name)():
            kwargs = getattr(self, 'copy_' + name)(**kwargs)
            self.queue.put((name, kwargs))

    def stop(self):
        self.queue.put((None, {}))

    def run(self):
        self.writer = SummaryWriter(os.path.join(self.env.model_dir, self.env.args.run))
        while True:
            name, kwargs = self.queue.get()
            if name is None:
                break
            func = getattr(self, 'summary_' + name)
            try:
                func(**kwargs)
            except:
                traceback.print_exc()

    def copy_scalar(self, **kwargs):
        step, loss_total, loss, loss_hparam = (kwargs[key] for key in 'step, loss_total, loss, loss_hparam'.split(', '))
        loss_total = loss_total.data.clone().cpu().numpy()
        loss = {key: loss[key].data.clone().cpu().numpy() for key in loss}
        loss_hparam = {key: loss_hparam[key].data.clone().cpu().numpy() for key in loss_hparam}
        return dict(
            step=step,
            loss_total=loss_total,
            loss=loss, loss_hparam=loss_hparam,
        )

    def summary_scalar(self, **kwargs):
        step, loss_total, loss, loss_hparam = (kwargs[key] for key in 'step, loss_total, loss, loss_hparam'.split(', '))
        for key in loss:
            self.writer.add_scalar('loss/' + key, loss[key][0], step)
        if self.config.getboolean('summary_scalar', 'loss_hparam'):
            self.writer.add_scalars('loss_hparam', {key: loss_hparam[key][0] for key in loss_hparam}, step)
        self.writer.add_scalar('loss_total', loss_total[0], step)

    def copy_image(self, **kwargs):
        step, height, width, rows, cols, data, pred, debug = (kwargs[key] for key in 'step, height, width, rows, cols, data, pred, debug'.split(', '))
        data = {key: data[key].clone().cpu().numpy() for key in 'image, yx_min, yx_max, cls'.split(', ')}
        pred = {key: pred[key].data.clone().cpu().numpy() for key in 'yx_min, yx_max, iou, logits'.split(', ') if key in pred}
        matching = (debug['positive'].float() - debug['negative'].float() + 1) / 2
        matching = matching.data.clone().cpu().numpy()
        return dict(
            step=step, height=height, width=width, rows=rows, cols=cols,
            data=data, pred=pred,
            matching=matching,
        )

    def summary_image(self, **kwargs):
        step, height, width, rows, cols, data, pred, matching = (kwargs[key] for key in 'step, height, width, rows, cols, data, pred, matching'.split(', '))
        image = data['image']
        limit = min(self.config.getint('summary_image', 'limit'), image.shape[0])
        image = image[:limit, :, :, :]
        yx_min, yx_max, iou = (pred[key] for key in 'yx_min, yx_max, iou'.split(', '))
        scale = [height / rows, width / cols]
        yx_min, yx_max = (a * scale for a in (yx_min, yx_max))
        if 'logits' in pred:
            cls = np.argmax(F.softmax(torch.autograd.Variable(torch.from_numpy(pred['logits'])), -1).data.cpu().numpy(), -1)
        else:
            cls = np.zeros(iou.shape, np.int)
        if self.config.getboolean('summary_image', 'bbox'):
            # data
            canvas = np.copy(image)
            canvas = pybenchmark.profile('bbox/data')(self.draw_bbox_data)(canvas, *(data[key] for key in 'yx_min, yx_max, cls'.split(', ')))
            self.writer.add_image('bbox/data', torchvision.utils.make_grid(torch.from_numpy(np.stack(canvas)).permute(0, 3, 1, 2).float(), normalize=True, scale_each=True), step)
            # pred
            canvas = np.copy(image)
            canvas = pybenchmark.profile('bbox/pred')(self.draw_bbox_pred)(canvas, yx_min, yx_max, cls, iou, nms=True)
            self.writer.add_image('bbox/pred', torchvision.utils.make_grid(torch.from_numpy(np.stack(canvas)).permute(0, 3, 1, 2).float(), normalize=True, scale_each=True), step)
        if self.config.getboolean('summary_image', 'iou'):
            # bbox
            canvas = np.copy(image)
            canvas_data = self.draw_bbox_data(canvas, *(data[key] for key in 'yx_min, yx_max, cls'.split(', ')), colors=['g'])
            # data
#.........这里部分代码省略.........
开发者ID:codealphago,项目名称:yolo2-pytorch,代码行数:103,代码来源:train.py

示例5: netg

# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_image [as 别名]
            noises.data.copy_(torch.randn(opt.batch_size, opt.nz, 1, 1))
            fake_img = netg(noises)
            fake_output = netd(fake_img)
            error_g = criterion(fake_output, true_labels)

            print('error_g:,', error_g.data[0])
            writer.add_scalar('data/error_g', error_g.data[0], ii)

            error_g.backward()
            optimizer_g.step()

        if (ii + 1) % opt.plot_every == 0:
            fix_fake_imgs = netg(fix_noises)

            fake = fix_fake_imgs[:64] * 0.5 + 0.5
            real = real_img[:64] * 0.5 + 0.5

            writer.add_image('image/fake_Image', fake, ii)
            writer.add_image('image/real_Image', real, ii)

            print('epoch[{}:{}],ii[{}:{}]'.format(epoch, opt.max_epoch, ii, len(dataloader)))

        if (epoch + 1) % opt.decay_every == 0:
            utils.save_image(fix_fake_imgs.data[:64], '%s/%s.png' % (opt.save_path, epoch), normalize=True,
                             range=(-1, 1))
            torch.save(netd.state_dict(), 'checkpoints/netd_%s.pth' % epoch)
            torch.save(netg.state_dict(), 'checkpoints/netg_%s.pth' % epoch)
            optimizer_g = torch.optim.Adam(netg.parameters(), opt.lr1, betas=(opt.beta1, 0.999))
            optimizer_d = torch.optim.Adam(netd.parameters(), opt.lr2, betas=(opt.beta1, 0.999))
开发者ID:HadXu,项目名称:machine-learning,代码行数:31,代码来源:main.py

示例6: main

# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_image [as 别名]
def main():
    writer = SummaryWriter(args.snapshot_dir)
    
    if not args.gpu == 'None':
        os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu
    h, w = map(int, args.input_size.split(','))
    input_size = (h, w)

    cudnn.enabled = True

    xlsor = XLSor(num_classes=args.num_classes)
    print(xlsor)

    saved_state_dict = torch.load(args.restore_from)
    new_params = xlsor.state_dict().copy()
    for i in saved_state_dict:
        i_parts = i.split('.')
        if not i_parts[0]=='fc':
            new_params['.'.join(i_parts[0:])] = saved_state_dict[i] 
    
    xlsor.load_state_dict(new_params)


    model = DataParallelModel(xlsor)
    model.train()
    model.float()
    model.cuda()    

    criterion = Criterion()
    criterion = DataParallelCriterion(criterion)
    criterion.cuda()
    
    cudnn.benchmark = True

    if not os.path.exists(args.snapshot_dir):
        os.makedirs(args.snapshot_dir)


    trainloader = data.DataLoader(XRAYDataSet(args.data_dir, args.data_list, max_iters=args.num_steps*args.batch_size, crop_size=input_size,
                    scale=args.random_scale, mirror=args.random_mirror, mean=IMG_MEAN), 
                    batch_size=args.batch_size, shuffle=True, num_workers=16, pin_memory=True)

    optimizer = optim.SGD([{'params': filter(lambda p: p.requires_grad, xlsor.parameters()), 'lr': args.learning_rate }],
                lr=args.learning_rate, momentum=args.momentum,weight_decay=args.weight_decay)
    optimizer.zero_grad()

    interp = nn.Upsample(size=input_size, mode='bilinear', align_corners=True)


    for i_iter, batch in enumerate(trainloader):
        i_iter += args.start_iters
        images, labels, _, _ = batch
        images = images.cuda()
        labels = labels.float().cuda()
        if torch_ver == "0.3":
            images = Variable(images)
            labels = Variable(labels)

        optimizer.zero_grad()
        lr = adjust_learning_rate(optimizer, i_iter)
        preds = model(images, args.recurrence)

        loss = criterion(preds, labels)
        loss.backward()
        optimizer.step()

        if i_iter % 100 == 0:
            writer.add_scalar('learning_rate', lr, i_iter)
            writer.add_scalar('loss', loss.data.cpu().numpy(), i_iter)

        if i_iter % 100 == 0:
            images_inv = inv_preprocess(images, args.save_num_images, IMG_MEAN)
            if isinstance(preds, list):
                preds = preds[0]
            if isinstance(preds, list):
                preds = preds[0]
            preds = interp(preds)
            for index, img in enumerate(images_inv):
                writer.add_image('Images/'+str(index), torch.from_numpy(img/255.).permute(2,0,1), i_iter)
                writer.add_image('Labels/'+str(index), labels[index], i_iter)
                writer.add_image('preds/'+str(index), (preds[index]>0.5).float(), i_iter)

        print('iter = {} of {} completed, loss = {}'.format(i_iter, args.num_steps, loss.data.cpu().numpy()))

        if i_iter >= args.num_steps-1:
            print('save model ...')
            torch.save(xlsor.state_dict(),osp.join(args.snapshot_dir, 'XLSor_'+str(args.num_steps)+'.pth'))
            break

        if i_iter % args.save_pred_every == 0:
            print('taking snapshot ...')
            torch.save(xlsor.state_dict(),osp.join(args.snapshot_dir, 'XLSor_'+str(i_iter)+'.pth'))

    end = timeit.default_timer()
    print(end-start,'seconds')
开发者ID:rsummers11,项目名称:CADLab,代码行数:97,代码来源:train.py

示例7: __init__

# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_image [as 别名]
class Train:
    __device = []
    __writer = []
    __model = []
    __transformations = []
    __dataset_train = []
    __train_loader = []
    __loss_func = []
    __optimizer = []
    __exp_lr_scheduler = []

    def __init__(self, gpu='0'):
        # Device configuration
        self.__device = torch.device('cuda:'+gpu if torch.cuda.is_available() else 'cpu')
        self.__writer = SummaryWriter('logs')
        self.__model = CNNDriver()
        # Set model to train mode
        self.__model.train()
        print(self.__model)
        self.__writer.add_graph(self.__model, torch.rand(10, 3, 66, 200))
        # Put model on GPU
        self.__model = self.__model.to(self.__device)

    def train(self, num_epochs=100, batch_size=400, lr=0.0001, l2_norm=0.001, save_dir='./save', input='./DataLMDB'):
        # Create log/save directory if it does not exist
        if not os.path.exists('./logs'):
            os.makedirs('./logs')
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        self.__transformations = transforms.Compose([AugmentDrivingTransform(), 
                                                     RandomBrightness(), ConvertToGray(), 
                                                     ConvertToSepia(), AddNoise(), DrivingDataToTensor(),])
        self.__dataset_train = DriveData_LMDB(input, self.__transformations)
        self.__train_loader = DataLoader(self.__dataset_train, batch_size=batch_size, shuffle=True, num_workers=4)

        # Loss and Optimizer
        self.__loss_func = nn.MSELoss()
        # self.__loss_func = nn.SmoothL1Loss()
        self.__optimizer = torch.optim.Adam(self.__model.parameters(), lr=lr, weight_decay=l2_norm)

        # Decay LR by a factor of 0.1 every 10 epochs
        self.__exp_lr_scheduler = lr_scheduler.StepLR(self.__optimizer, step_size=15, gamma=0.1)

        print('Train size:', len(self.__dataset_train), 'Batch size:', batch_size)
        print('Batches per epoch:', len(self.__dataset_train) // batch_size)

        # Train the Model
        iteration_count = 0
        for epoch in range(num_epochs):
            for batch_idx, samples in enumerate(self.__train_loader):

                # Send inputs/labels to GPU
                images = samples['image'].to(self.__device)
                labels = samples['label'].to(self.__device)

                self.__optimizer.zero_grad()

                # Forward + Backward + Optimize
                outputs = self.__model(images)
                loss = self.__loss_func(outputs, labels.unsqueeze(dim=1))

                loss.backward()
                self.__optimizer.step()
                self.__exp_lr_scheduler.step(epoch)

                # Send loss to tensorboard
                self.__writer.add_scalar('loss/', loss.item(), iteration_count)
                self.__writer.add_histogram('steering_out', outputs.clone().detach().cpu().numpy(), iteration_count, bins='doane')
                self.__writer.add_histogram('steering_in', 
                                            labels.unsqueeze(dim=1).clone().detach().cpu().numpy(), iteration_count, bins='doane')

                # Get current learning rate (To display on Tensorboard)
                for param_group in self.__optimizer.param_groups:
                    curr_learning_rate = param_group['lr']
                    self.__writer.add_scalar('learning_rate/', curr_learning_rate, iteration_count)

                # Display on each epoch
                if batch_idx == 0:
                    # Send image to tensorboard
                    self.__writer.add_image('Image', images, epoch)
                    self.__writer.add_text('Steering', 'Steering:' + str(outputs[batch_idx].item()), epoch)
                    # Print Epoch and loss
                    print('Epoch [%d/%d] Loss: %.4f' % (epoch + 1, num_epochs, loss.item()))
                    # Save the Trained Model parameters
                    torch.save(self.__model.state_dict(), save_dir+'/cnn_' + str(epoch) + '.pkl')

                iteration_count += 1
开发者ID:leonardoaraujosantos,项目名称:DriverLessCarHackathon,代码行数:90,代码来源:train.py

示例8: STGANAgent

# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_image [as 别名]

#.........这里部分代码省略.........
            self.optimizer_D.zero_grad()
            d_loss.backward(retain_graph=True)
            self.optimizer_D.step()

            # summarize
            scalars = {}
            scalars['D/loss'] = d_loss.item()
            scalars['D/loss_adv'] = d_loss_adv.item()
            scalars['D/loss_cls'] = d_loss_cls.item()
            scalars['D/loss_real'] = d_loss_real.item()
            scalars['D/loss_fake'] = d_loss_fake.item()
            scalars['D/loss_gp'] = d_loss_gp.item()

            # =================================================================================== #
            #                               3. Train the generator                                #
            # =================================================================================== #

            if (i + 1) % self.config.n_critic == 0:
                # original-to-target domain
                x_fake = self.G(x_real, attr_diff)
                out_src, out_cls = self.D(x_fake)
                g_loss_adv = - torch.mean(out_src)
                g_loss_cls = self.classification_loss(out_cls, label_trg)

                # target-to-original domain
                x_reconst = self.G(x_fake, c_org - c_org)
                g_loss_rec = torch.mean(torch.abs(x_real - x_reconst))

                # backward and optimize
                g_loss = g_loss_adv + self.config.lambda3 * g_loss_rec + self.config.lambda2 * g_loss_cls
                self.optimizer_G.zero_grad()
                g_loss.backward()
                self.optimizer_G.step()

                # summarize
                scalars['G/loss'] = g_loss.item()
                scalars['G/loss_adv'] = g_loss_adv.item()
                scalars['G/loss_cls'] = g_loss_cls.item()
                scalars['G/loss_rec'] = g_loss_rec.item()

            self.current_iteration += 1

            # =================================================================================== #
            #                                 4. Miscellaneous                                    #
            # =================================================================================== #

            if self.current_iteration % self.config.summary_step == 0:
                et = time.time() - start_time
                et = str(datetime.timedelta(seconds=et))[:-7]
                print('Elapsed [{}], Iteration [{}/{}]'.format(et, self.current_iteration, self.config.max_iters))
                for tag, value in scalars.items():
                    self.writer.add_scalar(tag, value, self.current_iteration)

            if self.current_iteration % self.config.sample_step == 0:
                self.G.eval()
                with torch.no_grad():
                    x_sample = x_sample.to(self.device)
                    x_fake_list = [x_sample]
                    for c_trg_sample in c_sample_list:
                        attr_diff = c_trg_sample.to(self.device) - c_org_sample.to(self.device)
                        attr_diff = attr_diff * self.config.thres_int
                        x_fake_list.append(self.G(x_sample, attr_diff.to(self.device)))
                    x_concat = torch.cat(x_fake_list, dim=3)
                    self.writer.add_image('sample', make_grid(self.denorm(x_concat.data.cpu()), nrow=1),
                                          self.current_iteration)
                    save_image(self.denorm(x_concat.data.cpu()),
                               os.path.join(self.config.sample_dir, 'sample_{}.jpg'.format(self.current_iteration)),
                               nrow=1, padding=0)

            if self.current_iteration % self.config.checkpoint_step == 0:
                self.save_checkpoint()

            self.lr_scheduler_G.step()
            self.lr_scheduler_D.step()

    def test(self):
        self.load_checkpoint()
        self.G.to(self.device)

        tqdm_loader = tqdm(self.data_loader.test_loader, total=self.data_loader.test_iterations,
                          desc='Testing at checkpoint {}'.format(self.config.checkpoint))

        self.G.eval()
        with torch.no_grad():
            for i, (x_real, c_org) in enumerate(tqdm_loader):
                x_real = x_real.to(self.device)
                c_trg_list = self.create_labels(c_org, self.config.attrs)

                x_fake_list = [x_real]
                for c_trg in c_trg_list:
                    attr_diff = c_trg - c_org
                    x_fake_list.append(self.G(x_real, attr_diff.to(self.device)))
                x_concat = torch.cat(x_fake_list, dim=3)
                result_path = os.path.join(self.config.result_dir, 'sample_{}.jpg'.format(i + 1))
                save_image(self.denorm(x_concat.data.cpu()), result_path, nrow=1, padding=0)

    def finalize(self):
        print('Please wait while finalizing the operation.. Thank you')
        self.writer.export_scalars_to_json(os.path.join(self.config.summary_dir, 'all_scalars.json'))
        self.writer.close()
开发者ID:bluestyle97,项目名称:STGAN-pytorch,代码行数:104,代码来源:stgan.py

示例9: TensorBoardImages

# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_image [as 别名]
class TensorBoardImages(Callback):
    """The TensorBoardImages callback will write a selection of images from the validation pass to tensorboard using the
    TensorboardX library and torchvision.utils.make_grid
    """

    def __init__(self, log_dir='./logs',
                 comment='torchbearer',
                 name='Image',
                 key=torchbearer.Y_PRED,
                 write_each_epoch=True,
                 num_images=16,
                 nrow=8,
                 padding=2,
                 normalize=False,
                 range=None,
                 scale_each=False,
                 pad_value=0):
        """Create TensorBoardImages callback which writes images from the given key to the given path. Full name of
        image sub directory will be model name + _ + comment.

        :param log_dir: The tensorboard log path for output
        :type log_dir: str
        :param comment: Descriptive comment to append to path
        :type comment: str
        :param name: The name of the image
        :type name: str
        :param key: The key in state containing image data (tensor of size [c, w, h] or [b, c, w, h])
        :type key: str
        :param write_each_epoch: If True, write data on every epoch, else write only for the first epoch.
        :type write_each_epoch: bool
        :param num_images: The number of images to write
        :type num_images: int
        :param nrow: See `torchvision.utils.make_grid https://pytorch.org/docs/stable/torchvision/utils.html#torchvision.utils.make_grid`
        :param padding: See `torchvision.utils.make_grid https://pytorch.org/docs/stable/torchvision/utils.html#torchvision.utils.make_grid`
        :param normalize: See `torchvision.utils.make_grid https://pytorch.org/docs/stable/torchvision/utils.html#torchvision.utils.make_grid`
        :param range: See `torchvision.utils.make_grid https://pytorch.org/docs/stable/torchvision/utils.html#torchvision.utils.make_grid`
        :param scale_each: See `torchvision.utils.make_grid https://pytorch.org/docs/stable/torchvision/utils.html#torchvision.utils.make_grid`
        :param pad_value: See `torchvision.utils.make_grid https://pytorch.org/docs/stable/torchvision/utils.html#torchvision.utils.make_grid`
        """
        self.log_dir = log_dir
        self.comment = comment
        self.name = name
        self.key = key
        self.write_each_epoch = write_each_epoch
        self.num_images = num_images
        self.nrow = nrow
        self.padding = padding
        self.normalize = normalize
        self.range = range
        self.scale_each = scale_each
        self.pad_value = pad_value

        self._writer = None
        self._data = None
        self.done = False

    def on_start(self, state):
        log_dir = os.path.join(self.log_dir, state[torchbearer.MODEL].__class__.__name__ + '_' + self.comment)
        self._writer = SummaryWriter(log_dir=log_dir)

    def on_step_validation(self, state):
        if not self.done:
            data = state[self.key].clone()

            if len(data.size()) == 3:
                data = data.unsqueeze(1)

            if self._data is None:
                remaining = self.num_images if self.num_images < data.size(0) else data.size(0)

                self._data = data[:remaining].to('cpu')
            else:
                remaining = self.num_images - self._data.size(0)

                if remaining > data.size(0):
                    remaining = data.size(0)

                self._data = torch.cat((self._data, data[:remaining].to('cpu')), dim=0)

            if self._data.size(0) >= self.num_images:
                image = utils.make_grid(
                    self._data,
                    nrow=self.nrow,
                    padding=self.padding,
                    normalize=self.normalize,
                    range=self.range,
                    scale_each=self.scale_each,
                    pad_value=self.pad_value
                )
                self._writer.add_image(self.name, image, state[torchbearer.EPOCH])
                self.done = True
                self._data = None

    def on_end_epoch(self, state):
        if self.write_each_epoch:
            self.done = False

    def on_end(self, state):
        self._writer.close()
开发者ID:little1tow,项目名称:torchbearer,代码行数:101,代码来源:tensor_board.py

示例10: net_gener

# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_image [as 别名]
        batch_v = batch_v.to(device)
        gen_output_v = net_gener(gen_input_v)

        # train discriminator
        dis_optimizer.zero_grad()
        dis_output_true_v = net_discr(batch_v)
        dis_output_fake_v = net_discr(gen_output_v.detach())
        dis_loss = objective(dis_output_true_v, true_labels_v) + objective(dis_output_fake_v, fake_labels_v)
        dis_loss.backward()
        dis_optimizer.step()
        dis_losses.append(dis_loss.item())

        # train generator
        gen_optimizer.zero_grad()
        dis_output_v = net_discr(gen_output_v)
        gen_loss_v = objective(dis_output_v, true_labels_v)
        gen_loss_v.backward()
        gen_optimizer.step()
        gen_losses.append(gen_loss_v.item())

        iter_no += 1
        if iter_no % REPORT_EVERY_ITER == 0:
            log.info("Iter %d: gen_loss=%.3e, dis_loss=%.3e", iter_no, np.mean(gen_losses), np.mean(dis_losses))
            writer.add_scalar("gen_loss", np.mean(gen_losses), iter_no)
            writer.add_scalar("dis_loss", np.mean(dis_losses), iter_no)
            gen_losses = []
            dis_losses = []
        if iter_no % SAVE_IMAGE_EVERY_ITER == 0:
            writer.add_image("fake", vutils.make_grid(gen_output_v.data[:64]), iter_no)
            writer.add_image("real", vutils.make_grid(batch_v.data[:64]), iter_no)
开发者ID:dhaopku,项目名称:Deep-Reinforcement-Learning-Hands-On,代码行数:32,代码来源:03_atari_gan.py

示例11: SummaryMaker

# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_image [as 别名]

#.........这里部分代码省略.........
        has a batch dimension it takes the FIRST ELEMENT of the batch. The image
        is displayed as fusion of the input image in grayscale and the overlay
        in the chosen color_map, this fusion is controlled by the alpha factor.
        In the case of the embeddings, since there are multiple feature
        channels, we show each of them individually in a grid.
        OBS: The colors represent relative values, where the peak color corresponds
        to the maximum value in any given channel, so no direct value comparisons
        can be made between epochs, only the relative distribution of neighboring
        pixel values, (which should be enough, since we are mosly interested
        in finding the maximum of a given correlation map)

        Args:
            tag: (str) The string identifying the image in tensorboard, images
                with the same tag are grouped together with a slider, and are
                indexed by epoch.
            embed: (torch.Tensor) The tensor containing the embedding of an
                input (ref or search image) or a correlation map (the final
                output). The shape should be [B, C, H, W] or [B, H, W] for the
                case of the correlation map.
            img: (torch.Tensor) The image on top of which the embed is going
                to be overlaid. Reference image embeddings should be overlaid
                on top of reference images and search image embeddings as well
                as the correlation maps should be overlaid on top of the search
                images.
            alpha: (float) A mixing variable, it controls how much of the final
                embedding corresponds to the grayscale input image and how much
                corresponds to the overlay. Alpha = 0, means there is no
                overlay in the final image, only the input image. Conversely,
                Alpha = 1 means there is only overlay. Adjust this value so
                you can distinctly see the overlay details while still seeing
                where it is in relation to the orignal image.
            cmap: (str) The name of the colormap to be used with the overlay.
                The colormaps are defined in the colormaps.py module, but values
                include 'viridis' (greenish blue) and 'inferno' (yellowish red).
            add_ref: (torch.Tensor) Optional. An additional reference image that
                will be plotted to the side of the other images. Useful when
                plotting correlation maps, because it lets the user see both
                the search image and the reference that is used as the target.

        ``Example``
            >>> summ_maker = SummaryMaker(os.path.join(exp_dir, 'tensorboard'), params,
                                           model.upscale_factor)
            ...
            >>> embed_ref = model.get_embedding(ref_img_batch)
            >>> embed_srch = model.get_embedding(search_batch)
            >>> output_batch = model.match_corr(embed_ref, embed_srch)
            >>> batch_index = 0
            >>> summ_maker.add_overlay("Ref_image_{}".format(tbx_index), embed_ref[batch_index], ref_img_batch[batch_index], cmap='inferno')
            >>> summ_maker.add_overlay("Search_image_{}".format(tbx_index), embed_srch[batch_index], search_batch[batch_index], cmap='inferno')
            >>> summ_maker.add_overlay("Correlation_map_{}".format(tbx_index), output_batch[batch_index], search_batch[batch_index], cmap='inferno')
        """
        # TODO Add numbers in the final image to the feature channels.
        # TODO Add the color bar showing the progression of values.
        # If minibatch is given, take only the first image
        # TODO let the user select the image? Loop on all images?
        if len(embed.shape) == 4:
            embed = embed[0]
        if len(img.shape) == 4:
            img = img[0]
        # Normalize the image.
        img = img - img.min()
        img = img/img.max()
        embed = cm.apply_cmap(embed, cmap=cmap)
        # Get grayscale version of image by taking the weighted average of the channels
        # as described in https://www.cs.virginia.edu/~vicente/recognition/notebooks/image_processing_lab.html#2.-Converting-to-Grayscale
        R,G,B = img
        img_gray = 0.21 * R + 0.72 * G + 0.07 * B
        # Get the upscaled size of the embedding, so as to take into account
        # the network's downscale caused by the stride.
        upsc_size = (embed.shape[-1] - 1) * self.up_factor + 1
        embed = F.interpolate(embed, upsc_size, mode='bilinear',
                              align_corners=False)
        # Pad the embedding with zeros to match the image dimensions. We pad
        # all 4 corners equally to keep the embedding centered.
        tot_pad = img.shape[-1] - upsc_size
        # Sanity check 1. The amount of padding must be equal on all sides, so
        # the total padding on any dimension must be an even integer.
        assert tot_pad % 2 == 0, "The embed or image dimensions are incorrect."
        pad = int(tot_pad/2)
        embed = F.pad(embed, (pad, pad, pad, pad), 'constant', 0)
        # Sanity check 2, the size of the embedding in the (H, w) dimensions
        # matches the size of the image.
        assert embed.shape[-2:] == img.shape[-2:], ("The embedding overlay "
                                                    "and image dimensions "
                                                    "do not agree.")
        final_imgs = alpha * embed + (1-alpha) * img_gray
        # The embedding_channel (or feature channel) dimension is treated like
        # a batch dimension, so the grid shows each individual embeding
        # overlayed with the input image. Plus the original image is also shown.
        # If add_ref is used the ref image is the first to be shown.
        img = img.unsqueeze(0)
        final_imgs = torch.cat((img, final_imgs))
        if add_ref is not None:
            # Pads the image if necessary
            pad = int((img.shape[-1] - add_ref.shape[-1])//2)
            add_ref = F.pad(add_ref, (pad, pad, pad, pad), 'constant', 0)
            add_ref = add_ref.unsqueeze(0)
            final_imgs = torch.cat((add_ref, final_imgs))
        final_imgs = make_grid(final_imgs, nrow=6)
        self.writer_val.add_image(tag, final_imgs, self.epoch)
开发者ID:vero1925,项目名称:Pytorch-SiamFC,代码行数:104,代码来源:summary_utils.py

示例12: make_grid

# 需要导入模块: from tensorboardX import SummaryWriter [as 别名]
# 或者: from tensorboardX.SummaryWriter import add_image [as 别名]
            # Backward the averaged gradient
            loss /= p['nAveGrad']
            loss.backward()
            aveGrad += 1

            # Update the weights once in p['nAveGrad'] forward passes
            if aveGrad % p['nAveGrad'] == 0:
                writer.add_scalar('data/total_loss_iter', loss.item(), ii + num_img_tr * epoch)
                optimizer.step()
                optimizer.zero_grad()
                aveGrad = 0

            if ii % num_img_tr / 20 == 0:
                grid_image = make_grid(inputs[:3].clone().cpu().data, 3, normalize=True)
                writer.add_image('image', grid_image)
                grid_image = make_grid(utils.decode_seg_map_sequence(torch.max(output[:3], 1)[1].detach().cpu().numpy()), 3, normalize=False,
                                       range=(0, 255))
                writer.add_image('Predicted label', grid_image)
                grid_image = make_grid(utils.decode_seg_map_sequence(torch.squeeze(gts[:3], 1).detach().cpu().numpy()), 3, normalize=False, range=(0, 255))
                writer.add_image('Groundtruth label', grid_image)

        # Save the model
        if (epoch % snapshot) == snapshot - 1:
            torch.save(net.state_dict(), os.path.join(save_dir, 'models', modelName + '_epoch-' + str(epoch) + '.pth'))
            print("Save model at {}\n".format(os.path.join(save_dir, 'models', modelName + '_epoch-' + str(epoch) + '.pth')))

        # One testing epoch
        if useTest and epoch % nTestInterval == (nTestInterval - 1):
            net.eval()
            for ii, sample_batched in enumerate(testloader):
开发者ID:codes-kzhan,项目名称:pytorch-deeplab-xception,代码行数:32,代码来源:train.py


注:本文中的tensorboardX.SummaryWriter.add_image方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。