当前位置: 首页>>代码示例>>Python>>正文


Python Variable.backward方法代码示例

本文整理汇总了Python中torch.autograd.Variable.backward方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.backward方法的具体用法?Python Variable.backward怎么用?Python Variable.backward使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.autograd.Variable的用法示例。


在下文中一共展示了Variable.backward方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: meta_update

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import backward [as 别名]
def meta_update(meta_init, meta_init_grads, meta_alpha, meta_alpha_grads, 
                meta_init_optimizer, meta_alpha_optimizer):
    # Unpack the list of grad dicts
    init_gradients = {k: sum(d[k] for d in meta_init_grads) for k in meta_init_grads[0].keys()}
    alpha_gradients = {k: sum(d[k] for d in meta_alpha_grads) for k in meta_alpha_grads[0].keys()}
    
    # dummy variable to mimic forward and backward
    dummy_x = Variable(torch.Tensor(np.random.randn(1)), requires_grad=False).cuda()
    
    # update meta_init(for initial weights)
    for k,init in meta_init.items():
        dummy_x = torch.sum(dummy_x*init)
    meta_init_optimizer.zero_grad()                                                               
    dummy_x.backward()
    for k,init in meta_init.items():
        init.grad = init_gradients[k]
    meta_init_optimizer.step()

    # update meta_alpha(for learning rate)
    dummy_y = Variable(torch.Tensor(np.random.randn(1)), requires_grad=False).cuda()
    for k,alpha in meta_alpha.items():
        dummy_y = torch.sum(dummy_y*alpha)
    meta_alpha_optimizer.zero_grad()
    dummy_y.backward()
    for k,alpha in meta_alpha.items():
        alpha.grad = alpha_gradients[k]
    meta_alpha_optimizer.step()
开发者ID:XHWXD,项目名称:meta_trackers,代码行数:29,代码来源:train_meta_init.py

示例2: eval_hess_vec_prod

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import backward [as 别名]
def eval_hess_vec_prod(vec, params, net, criterion, dataloader, use_cuda=False):
    """
    Evaluate product of the Hessian of the loss function with a direction vector "vec".
    The product result is saved in the grad of net.

    Args:
        vec: a list of tensor with the same dimensions as "params".
        params: the parameter list of the net (ignoring biases and BN parameters).
        net: model with trained parameters.
        criterion: loss function.
        dataloader: dataloader for the dataset.
        use_cuda: use GPU.
    """

    if use_cuda:
        net.cuda()
        vec = [v.cuda() for v in vec]

    net.eval()
    net.zero_grad() # clears grad for every parameter in the net

    for batch_idx, (inputs, targets) in enumerate(dataloader):
        inputs, targets = Variable(inputs), Variable(targets)
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()

        outputs = net(inputs)
        loss = criterion(outputs, targets)
        grad_f = torch.autograd.grad(loss, inputs=params, create_graph=True)

        # Compute inner product of gradient with the direction vector
        prod = Variable(torch.zeros(1)).type(type(grad_f[0].data))
        for (g, v) in zip(grad_f, vec):
            prod = prod + (g * v).cpu().sum()

        # Compute the Hessian-vector product, H*v
        # prod.backward() computes dprod/dparams for every parameter in params and
        # accumulate the gradients into the params.grad attributes
        prod.backward()
开发者ID:anirband,项目名称:loss-landscape,代码行数:41,代码来源:hess_vec_prod.py

示例3: construct_multigraph

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import backward [as 别名]
    g2, h2 = construct_multigraph(smile)
    
    for k in xrange(0, T):
      message_pass(g, h, k)

    x = readout(h, h2)
    #x = F.selu( fc(x) )
    y_hat = linear(x)
    y = train_labels[sample_index]

    y_hats_train.append(y_hat)

    error = (y_hat - y)*(y_hat - y) / Variable(torch.FloatTensor([BATCH_SIZE])).view(1, 1)
    train_loss = train_loss + error

  train_loss.backward()
  optimizer.step()

  if i % int(len(train_smiles) / BATCH_SIZE) == 0:
    val_loss = Variable(torch.zeros(1, 1), requires_grad=False)
    y_hats_val = []
    for j in xrange(0, len(val_smiles)):
      g, h = construct_multigraph(val_smiles[j])
      g2, h2 = construct_multigraph(val_smiles[j])

      for k in xrange(0, T):
        message_pass(g, h, k)

      x = readout(h, h2)
      #x = F.selu( fc(x) )
      y_hat = linear(x)
开发者ID:ktaneishi,项目名称:deepchem,代码行数:33,代码来源:mpnn.py

示例4: Variable

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import backward [as 别名]
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import torch
from torch.autograd import Variable
v = Variable(torch.Tensor([0, 0, 0]), requires_grad=True)
h = v.register_hook(lambda grad: grad * 2)  # double the gradient
v.backward(torch.Tensor([1, 1, 1]))
#先计算原始梯度,再进hook,获得一个新梯度。
print(v.grad.data)
v.grad.data.zero_()
print(v.grad.data)
v.backward(torch.Tensor([1, 1, 1]))
v.backward(torch.Tensor([1, 1, 1]))
print(v.grad.data)
h.remove()  # removes the hook
开发者ID:XuChongBo,项目名称:pydemo,代码行数:17,代码来源:pytorch-grad.py

示例5: optimize_model

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import backward [as 别名]

#.........这里部分代码省略.........
        action_batch = Variable(torch.cat(batch.action))
        reward_batch = Variable(torch.cat(batch.reward))
        next_states = Variable(torch.cat(batch.next_state))
        terminals=np.array(batch.terminated)

        # P[k, a, i] is the probability of atom z_i when action a is taken in the next state (for the kth sample)
        P0 = model0(next_states)
        P1 = model1(next_states)
        #print(torch.sum(P0[0]))
        #print(torch.sum(P1[0])) 
        # Q[k, a] is the value of action a (for the kth sample)
        #print(np.dot(P0.data.numpy(), self.z))
        Q = np.vstack((np.dot(P0.data.numpy(), self.z),np.dot(P1.data.numpy(), self.z))).T
        #print(Q)
        # A_[k] is the optimal action (for the kth sample)
        A_ = np.argmax(Q, axis=1)
        #print(A_)

        # Target vector
        M = np.zeros((BATCH_SIZE, self.action_count, self.n), dtype=np.float32)
        #print(reward_batch.data.numpy())
        # Compute projection onto the support (for terminal states, just reward)
        Tz = np.repeat(reward_batch.data.numpy().reshape(-1, 1), self.n, axis=1) + np.dot(self.gamma * (1.0 - terminals).reshape(-1, 1),
                                                                        self.z.reshape(1, -1))
        #print(self.gamma * (1.0 - terminals).reshape(-1, 1))
        
        # TODO: Verify correctnes
        # Clipping to endpoints like described in paper causes probabilities to disappear (when B = L = U).
        # To avoid this, I shift the end points to ensure that L and U are not both equal to B
        Tz = np.clip(Tz, self.vmin + 0.01, self.vmax - 0.01)
        

        B = (Tz - self.vmin) / self.dz
        L = np.floor(B).astype(np.int32)
        U = np.ceil(B).astype(np.int32)

        # Distribute probability
        for i in range(BATCH_SIZE):
            for j in range(self.n):
                if(A_[i]==0):
                    M[i, A_[i], L[i, j]] += P0[i, j].data[0] * (U[i, j] - B[i, j])
                    M[i, A_[i], U[i, j]] += P0[i, j].data[0] * (B[i, j] - L[i, j])
                else:
                    M[i, A_[i], L[i, j]] += P1[i, j].data[0] * (U[i, j] - B[i, j])
                    M[i, A_[i], U[i, j]] += P1[i, j].data[0] * (B[i, j] - L[i, j])

                #M[i, 1-A_[i], L[i, j]] = P[i, 1-A_[i], j].data[0]
                #M[i, 1-A_[i], U[i, j]] = P[i, 1-A_[i], j].data[0]

        #print("P:")
        #print(P[0])
        #print(M[0])
        #print("M:")
        #print(M[0])


        #print(A_)
        #print(action_batch)
        action_mask = LongTensor(A_).view(BATCH_SIZE,1,1).expand(BATCH_SIZE,1,9)
        #print(action_mask)
        
        q_probs0 = model0(state_batch)
        q_probs1 = model1(state_batch)
        #print(q_probs0[0])
        #print(q_probs1[0])
        qa_probs = [q_probs0[i] if action_batch[i].data[0] == 0 else q_probs1[i] for i in range(BATCH_SIZE)]
        #print(qa_probs[0])

        #criterion = nn.BCEWithLogitsLoss()
        #print(P.view(BATCH_SIZE,18)[0])
        #print(qa_probs[0])
        #print(M)
        matrix = Variable(Tensor(M)).gather(1, action_mask).squeeze()
        #print(matrix[0])
        #print(matrix[0])

        loss0=Variable(Tensor([0.0]))
        loss1=Variable(Tensor([0.0]))
        counter=0
        for i in range(BATCH_SIZE):
            #print(matrix[i] * torch.log(qa_probs[i]))
            #print(torch.sum(matrix[i] * torch.log(qa_probs[i])))
            #print(M[i][A_[i]])
            #print(matrix[i])
            if action_batch[i].data[0]==0:
                loss0 -= torch.sum(matrix[i] * torch.log(qa_probs[i]))
                counter+=1
            else:
                loss1 -= torch.sum(matrix[i] * torch.log(qa_probs[i]))

        #print(loss0)
        #print(loss1)
        if(counter>0):
            optimizer0.zero_grad()
            loss0.backward()
            optimizer0.step()
        if(counter<BATCH_SIZE):
            optimizer1.zero_grad()
            loss1.backward()
            optimizer1.step()
开发者ID:ziebalp,项目名称:distributional_dqn,代码行数:104,代码来源:cat_dqn_2.py

示例6: Variable

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import backward [as 别名]
        NNs = Ns.float().unsqueeze(1).expand(bs,N)
        NNs = torch.ge(NNs, torch.arange(1,N+1).type(dtype).unsqueeze(0).expand(bs,N)).float()
        
        if test:
            loss = Variable(torch.zeros(1).type(dtype))
            w, c, (Ns, NNs) = execute(Knap, scales, weights, volumes, C,(Ns, NNs),  n_samples, 'test')
        else:
            loss, w, c, (Ns, NNs) = execute(Knap, scales, weights, volumes, C,(Ns, NNs),  n_samples, 'train')
        
        trivial_w = trivial_algorithm(weights.data, volumes.data, C.data)
        
        
        
        if not test:
            Knap.zero_grad()
            loss.backward()
            nn.utils.clip_grad_norm(Knap.parameters(), clip_grad_norm)
            optimizer.step()
        
        log.add('w', w.data.mean())
        log.add('tw', trivial_w.mean())
        log.add('opt', OptW.data.mean())
        log.add('loss', loss.data.cpu().numpy()[0])
        log.add('ratioW', (OptW.data/w.data).mean())
        log.add('ratioT', (OptW.data/trivial_w).mean())

        if not test:
            if it%50 == 0:
                elapsed = time.time() - start
                loss = log.get('loss').mean()
                w = log.get('w').mean()
开发者ID:ParsonsZeng,项目名称:DiCoNet,代码行数:33,代码来源:knapsack.py

示例7: print

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import backward [as 别名]
out.backward()
print('*' * 10)
print('=====simple gradient======')
print('input')
print(a.data)
print('compute result is')
print(out.data[0])
print('input gradients are')
print(a.grad.data)

# backward on non-scalar output
m = Variable(torch.FloatTensor([[2, 3]]), requires_grad=True)
n = Variable(torch.zeros(1, 2))
n[0, 0] = m[0, 0]**2
n[0, 1] = m[0, 1]**3
n.backward(torch.FloatTensor([[1, 1]]))
print('*' * 10)
print('=====non scalar output======')
print('input')
print(m.data)
print('input gradients are')
print(m.grad.data)

# jacobian
j = torch.zeros(2, 2)
k = Variable(torch.zeros(1, 2))
m.grad.data.zero_()
k[0, 0] = m[0, 0]**2 + 3 * m[0, 1]
k[0, 1] = m[0, 1]**2 + 2 * m[0, 0]
k.backward(torch.FloatTensor([[1, 0]]), retain_variables=True)
j[:, 0] = m.grad.data
开发者ID:Raven013,项目名称:code-of-learn-deep-learning-with-pytorch,代码行数:33,代码来源:backward.py


注:本文中的torch.autograd.Variable.backward方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。