当前位置: 首页>>代码示例>>Python>>正文


Python Variable.detach方法代码示例

本文整理汇总了Python中torch.autograd.Variable.detach方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.detach方法的具体用法?Python Variable.detach怎么用?Python Variable.detach使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.autograd.Variable的用法示例。


在下文中一共展示了Variable.detach方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _train_on_instance_mixup

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import detach [as 别名]
 def _train_on_instance_mixup(self, z, x, **kwargs):
     """Perform mixup in the pixel space"""
     self._train()
     x.requires_grad = True # for dnorm
     # Train the generator.
     self.optim['g'].zero_grad()
     alpha = self.sample_lambda(x.size(0))
     fake = self.g(z)
     xz = Variable(alpha*x.data + (1.-alpha)*fake.data)
     if self.mixup_ff:
         perm = torch.randperm(fake.size(0)).view(-1).long()
         fake_perm = fake[perm]
         xz_ff = Variable(alpha*fake.data + (1.-alpha)*fake_perm.data)
     _, d_fake = self.d(fake)
     gen_loss = self.g_loss(d_fake)
     if (kwargs['iter']-1) % self.update_g_every == 0:
         gen_loss.backward()
         self.optim['g'].step()
     # Train the discriminator.
     self.optim['d'].zero_grad()
     _, d_xz = self.d(xz.detach())
     _, d_real = self.d(x)
     _, d_fake = self.d(fake.detach())
     d_loss = self.d_loss_fake(d_xz) + self.d_loss_real(d_real) + \
              self.d_loss_fake(d_fake)
     if self.mixup_ff:
         _, d_xz_ff = self.d(xz_ff.detach())
         d_loss += self.d_loss_fake(d_xz_ff)
     d_loss.backward()
     self.optim['d'].step()
     ##################################
     # Also compute the gradient norm.
     # Grad norm for D_REAL
     _, d_real = self.d(x)
     g_norm_x = self.grad_norm(d_real, x)
     if self.dnorm > 0.:
         self.optim['d'].zero_grad()
         (g_norm_x*self.dnorm).backward()
         self.optim['d'].step()
     self.optim['d'].zero_grad()
     ##################################
     losses = {
         'g_loss': gen_loss.data.item(),
         'd_loss': d_loss.data.item(),
         'd_real_norm': g_norm_x.data.item(),
     }
     outputs = {
         'x': x.detach(),
         'gz': fake.detach(),
     }
     return losses, outputs
开发者ID:kazk1018,项目名称:manifold_mixup,代码行数:53,代码来源:mugan.py

示例2: iter_discrete_traces

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import detach [as 别名]
def iter_discrete_traces(graph_type, fn, *args, **kwargs):
    """
    Iterate over all discrete choices of a stochastic function.

    When sampling continuous random variables, this behaves like `fn`.
    When sampling discrete random variables, this iterates over all choices.

    This yields `(scale, trace)` pairs, where `scale` is the probability of the
    discrete choices made in the `trace`.

    :param str graph_type: The type of the graph, e.g. "flat" or "dense".
    :param callable fn: A stochastic function.
    :returns: An iterator over (scale, trace) pairs.
    """
    queue = LifoQueue()
    queue.put(Trace())
    while not queue.empty():
        partial_trace = queue.get()
        escape_fn = functools.partial(util.discrete_escape, partial_trace)
        traced_fn = poutine.trace(poutine.escape(poutine.replay(fn, partial_trace), escape_fn),
                                  graph_type=graph_type)
        try:
            full_trace = traced_fn.get_trace(*args, **kwargs)
        except util.NonlocalExit as e:
            for extended_trace in util.enum_extend(traced_fn.trace.copy(), e.site):
                queue.put(extended_trace)
            continue

        # Scale trace by probability of discrete choices.
        log_pdf = full_trace.batch_log_pdf(site_filter=site_is_discrete)
        if isinstance(log_pdf, float):
            log_pdf = torch.Tensor([log_pdf])
        if isinstance(log_pdf, torch.Tensor):
            log_pdf = Variable(log_pdf)
        scale = torch.exp(log_pdf.detach())
        yield scale, full_trace
开发者ID:Magica-Chen,项目名称:pyro,代码行数:38,代码来源:enum.py

示例3: rollout

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import detach [as 别名]
    def rollout(self):
        obs = np.array(self.env.reset())
        batch_size = len(obs)

        # Reorder the language input for the encoder
        seq, seq_mask, seq_lengths, perm_idx = self._sort_batch(obs)
        perm_obs = obs[perm_idx]

        # Record starting point
        traj = [{
            'instr_id': ob['instr_id'],
            'path': [(ob['viewpoint'], ob['heading'], ob['elevation'])]
        } for ob in perm_obs]

        # Forward through encoder, giving initial hidden state and memory cell for decoder
        ctx,h_t,c_t = self.encoder(seq, seq_lengths)

        # Initial action
        a_t = Variable(torch.ones(batch_size).long() * self.model_actions.index('<start>'), 
                    requires_grad=False).cuda()
        ended = np.array([False] * batch_size) # Indices match permuation of the model, not env

        # Do a sequence rollout and calculate the loss
        self.loss = 0
        env_action = [None] * batch_size
        for t in range(self.episode_len):

            f_t = self._feature_variable(perm_obs) # Image features from obs
            h_t,c_t,alpha,logit = self.decoder(a_t.view(-1, 1), f_t, h_t, c_t, ctx, seq_mask)
            # Mask outputs where agent can't move forward
            for i,ob in enumerate(perm_obs):
                if len(ob['navigableLocations']) <= 1:
                    logit[i, self.model_actions.index('forward')] = -float('inf')             

            # Supervised training
            target = self._teacher_action(perm_obs, ended)
            self.loss += self.criterion(logit, target)

            # Determine next model inputs
            if self.feedback == 'teacher': 
                a_t = target                # teacher forcing
            elif self.feedback == 'argmax': 
                _,a_t = logit.max(1)        # student forcing - argmax
                a_t = a_t.detach()
            elif self.feedback == 'sample':
                probs = F.softmax(logit, dim=1)
                m = D.Categorical(probs)
                a_t = m.sample()            # sampling an action from model
            else:
                sys.exit('Invalid feedback option')

            # Updated 'ended' list and make environment action
            for i,idx in enumerate(perm_idx):
                action_idx = a_t[i].data[0]
                if action_idx == self.model_actions.index('<end>'):
                    ended[i] = True
                env_action[idx] = self.env_actions[action_idx]

            obs = np.array(self.env.step(env_action))
            perm_obs = obs[perm_idx]

            # Save trajectory output
            for i,ob in enumerate(perm_obs):
                if not ended[i]:
                    traj[i]['path'].append((ob['viewpoint'], ob['heading'], ob['elevation']))

            # Early exit if all ended
            if ended.all(): 
                break

        self.losses.append(self.loss.data[0] / self.episode_len)
        return traj
开发者ID:volkancirik,项目名称:Matterport3DSimulator,代码行数:74,代码来源:agent.py

示例4: train

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import detach [as 别名]
def train(train_loader, model, ema_model, optimizer, epoch, log):
    global global_step

    class_criterion = nn.CrossEntropyLoss(size_average=False, ignore_index=NO_LABEL).cuda()
    if args.consistency_type == 'mse':
        consistency_criterion = losses.softmax_mse_loss
    elif args.consistency_type == 'kl':
        consistency_criterion = losses.softmax_kl_loss
    else:
        assert False, args.consistency_type
    residual_logit_criterion = losses.symmetric_mse_loss

    meters = AverageMeterSet()

    # switch to train mode
    model.train()
    ema_model.train()

    end = time.time()
    for i, ((input, ema_input), target) in enumerate(train_loader):
        # measure data loading time
        meters.update('data_time', time.time() - end)

        adjust_learning_rate(optimizer, epoch, i, len(train_loader))
        meters.update('lr', optimizer.param_groups[0]['lr'])

        input_var = torch.autograd.Variable(input)
        ema_input_var = torch.autograd.Variable(ema_input, volatile=True)
        target_var = torch.autograd.Variable(target.cuda(async=True))

        minibatch_size = len(target_var)
        labeled_minibatch_size = target_var.data.ne(NO_LABEL).sum()
        assert labeled_minibatch_size > 0
        meters.update('labeled_minibatch_size', labeled_minibatch_size)

        ema_model_out = ema_model(ema_input_var)
        model_out = model(input_var)

        if isinstance(model_out, Variable):
            assert args.logit_distance_cost < 0
            logit1 = model_out
            ema_logit = ema_model_out
        else:
            assert len(model_out) == 2
            assert len(ema_model_out) == 2
            logit1, logit2 = model_out
            ema_logit, _ = ema_model_out

        ema_logit = Variable(ema_logit.detach().data, requires_grad=False)

        if args.logit_distance_cost >= 0:
            class_logit, cons_logit = logit1, logit2
            res_loss = args.logit_distance_cost * residual_logit_criterion(class_logit, cons_logit) / minibatch_size
            meters.update('res_loss', res_loss.data[0])
        else:
            class_logit, cons_logit = logit1, logit1
            res_loss = 0

        class_loss = class_criterion(class_logit, target_var) / minibatch_size
        meters.update('class_loss', class_loss.data[0])

        ema_class_loss = class_criterion(ema_logit, target_var) / minibatch_size
        meters.update('ema_class_loss', ema_class_loss.data[0])

        if args.consistency:
            consistency_weight = get_current_consistency_weight(epoch)
            meters.update('cons_weight', consistency_weight)
            consistency_loss = consistency_weight * consistency_criterion(cons_logit, ema_logit) / minibatch_size
            meters.update('cons_loss', consistency_loss.data[0])
        else:
            consistency_loss = 0
            meters.update('cons_loss', 0)

        loss = class_loss + consistency_loss + res_loss
        assert not (np.isnan(loss.data[0]) or loss.data[0] > 1e5), 'Loss explosion: {}'.format(loss.data[0])
        meters.update('loss', loss.data[0])

        prec1, prec5 = accuracy(class_logit.data, target_var.data, topk=(1, 5))
        meters.update('top1', prec1[0], labeled_minibatch_size)
        meters.update('error1', 100. - prec1[0], labeled_minibatch_size)
        meters.update('top5', prec5[0], labeled_minibatch_size)
        meters.update('error5', 100. - prec5[0], labeled_minibatch_size)

        ema_prec1, ema_prec5 = accuracy(ema_logit.data, target_var.data, topk=(1, 5))
        meters.update('ema_top1', ema_prec1[0], labeled_minibatch_size)
        meters.update('ema_error1', 100. - ema_prec1[0], labeled_minibatch_size)
        meters.update('ema_top5', ema_prec5[0], labeled_minibatch_size)
        meters.update('ema_error5', 100. - ema_prec5[0], labeled_minibatch_size)

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        global_step += 1
        update_ema_variables(model, ema_model, args.ema_decay, global_step)

        # measure elapsed time
        meters.update('batch_time', time.time() - end)
        end = time.time()

#.........这里部分代码省略.........
开发者ID:ys2899,项目名称:mean-teacher,代码行数:103,代码来源:main.py

示例5: test_ais

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import detach [as 别名]
def test_ais(model, data_x, batch_size, display, k, n_intermediate_dists):


    def intermediate_dist(t, z, mean, logvar, zeros, batch):
        # logp1 = lognormal(z, mean, logvar)  #[P,B]
        log_prior = lognormal(z, zeros, zeros)  #[P,B]
        log_likelihood = log_bernoulli(model.decode(z), batch)
        # logpT = log_prior + log_likelihood
        # log_intermediate_2 = (1-float(t))*logp1 + float(t)*logpT

        log_intermediate_2 = log_prior + float(t)*log_likelihood

        return log_intermediate_2


    def hmc(z, intermediate_dist_func):

        if torch.cuda.is_available():
            v = Variable(torch.FloatTensor(z.size()).normal_(), volatile=volatile_, requires_grad=requires_grad).cuda()
        else:
            v = Variable(torch.FloatTensor(z.size()).normal_()) 

        v0 = v
        z0 = z

        # print (intermediate_dist_func(z))
        # fasdf
        gradients = torch.autograd.grad(outputs=intermediate_dist_func(z), inputs=z,
                          grad_outputs=grad_outputs,
                          create_graph=True, retain_graph=retain_graph, only_inputs=True)[0]

        gradients = gradients.detach()

        v = v + .5 *step_size*gradients
        z = z + step_size*v

        for LF_step in range(n_HMC_steps):

            # log_intermediate_2 = intermediate_dist(t1, z, mean, logvar, zeros, batch)
            gradients = torch.autograd.grad(outputs=intermediate_dist_func(z), inputs=z,
                              grad_outputs=grad_outputs,
                              create_graph=True, retain_graph=retain_graph, only_inputs=True)[0]
            gradients = gradients.detach()
            v = v + step_size*gradients
            z = z + step_size*v

        # log_intermediate_2 = intermediate_dist(t1, z, mean, logvar, zeros, batch)
        gradients = torch.autograd.grad(outputs=intermediate_dist_func(z), inputs=z,
                          grad_outputs=grad_outputs,
                          create_graph=True, retain_graph=retain_graph, only_inputs=True)[0]
        gradients = gradients.detach()
        v = v + .5 *step_size*gradients

        return z0, v0, z, v


    def mh_step(z0, v0, z, v, step_size, intermediate_dist_func):

        logpv0 = lognormal(v0, zeros, zeros) #[P,B]
        hamil_0 =  intermediate_dist_func(z0) + logpv0
        
        logpvT = lognormal(v, zeros, zeros) #[P,B]
        hamil_T = intermediate_dist_func(z) + logpvT

        accept_prob = torch.exp(hamil_T - hamil_0)

        if torch.cuda.is_available():
            rand_uni = Variable(torch.FloatTensor(accept_prob.size()).uniform_(), volatile=volatile_, requires_grad=requires_grad).cuda()
        else:
            rand_uni = Variable(torch.FloatTensor(accept_prob.size()).uniform_())

        accept = accept_prob > rand_uni

        if torch.cuda.is_available():
            accept = accept.type(torch.FloatTensor).cuda()
        else:
            accept = accept.type(torch.FloatTensor)
        
        accept = accept.view(k, model.B, 1)

        z = (accept * z) + ((1-accept) * z0)

        #Adapt step size
        avg_acceptance_rate = torch.mean(accept)

        if avg_acceptance_rate.cpu().data.numpy() > .65:
            step_size = 1.02 * step_size
        else:
            step_size = .98 * step_size

        if step_size < 0.0001:
            step_size = 0.0001
        if step_size > 0.5:
            step_size = 0.5

        return z, step_size




#.........这里部分代码省略.........
开发者ID:chriscremer,项目名称:Other_Code,代码行数:103,代码来源:ais3.py

示例6: range

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import detach [as 别名]
for epoch in range(args.epoch):
    for iterate in range(train_len):
    
        for i in range(batchsize):
            batch = input_data.get_image(iterate)
            input_x_np[i,:] = np.asarray(batch[0])
            input_real_np[i,:] = np.asarray(batch[1])

        input_x = Variable(torch.from_numpy(input_x_np)).cuda()
        input_real = Variable(torch.from_numpy(input_real_np)).cuda()

        out_generator_G = generator_G.forward(input_x)

        optimizer_D.zero_grad()
        negative_examples = discriminator_D.forward(input_x.detach(), out_generator_G.detach())
        positive_examples = discriminator_D.forward(input_x, input_real)
        loss_dis = 0.5 * ( loss_binaryCrossEntropy(positive_examples, Variable(torch.ones(positive_examples.size())).cuda()) \
                          +loss_binaryCrossEntropy(negative_examples, Variable(torch.zeros(negative_examples.size())).cuda()))
        loss_dis.backward(retain_variables=True)
        optimizer_D.step()

        optimizer_G.zero_grad()
        negative_examples = discriminator_D.forward(input_x, out_generator_G)
        loss_gen = loss_binaryCrossEntropy(negative_examples, Variable(torch.ones(negative_examples.size())).cuda()) \
                  +loss_L1(out_generator_G, input_real) * args.lambda1
        loss_gen.backward()
        optimizer_G.step()

        if iterate % args.iterate == 0:
            print ('{} [{}/{}] LossGen= {} LossDis= {}'.format(iterate, epoch+1, args.epoch, loss_gen.data[0], loss_dis.data[0]))
开发者ID:kwangjinoh,项目名称:simple-pix2pix-pytorch,代码行数:32,代码来源:train.py


注:本文中的torch.autograd.Variable.detach方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。